summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig3
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/amba/bus.c12
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c23
-rw-r--r--drivers/clocksource/db5500-mtimer.c67
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c167
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c170
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c340
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/ux500/Kconfig29
-rw-r--r--drivers/crypto/ux500/Makefile8
-rw-r--r--drivers/crypto/ux500/cryp/Makefile13
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c418
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h308
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c2313
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c45
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h31
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h125
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h124
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h387
-rwxr-xr-xdrivers/crypto/ux500/hash/hash_alg_p.h26
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2080
-rw-r--r--drivers/dma/ste_dma40.c68
-rw-r--r--drivers/dma/ste_dma40_ll.c26
-rw-r--r--drivers/dma/ste_dma40_ll.h4
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ab8500.c287
-rw-r--r--drivers/gpio/gpio-nomadik.c124
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/mali/Kconfig15
-rw-r--r--drivers/gpu/mali/Makefile10
-rw-r--r--drivers/gpu/mali/mali400ko/.gitignore32
-rw-r--r--drivers/gpu/mali/mali400ko/Makefile102
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h236
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile346
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common56
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform45
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h87
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h94
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h87
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h79
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h93
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h107
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h121
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h109
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c370
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c1418
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c1187
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h171
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c892
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h134
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c183
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h99
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h21
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c515
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h25
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h17
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c1425
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c2924
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h66
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c309
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c348
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h145
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h21
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c240
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h46
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c2032
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h355
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h107
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c207
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h44
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c29
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h1620
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h166
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h184
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h252
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h1148
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h710
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c921
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h323
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c243
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h155
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c81
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h62
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c461
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h80
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c718
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h296
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h66
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c82
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h77
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c565
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h29
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c786
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c72
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h57
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c55
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c86
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c228
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c271
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c578
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c98
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c22
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c50
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c191
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c195
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h32
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c65
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c142
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c128
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c336
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c103
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c135
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c41
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h70
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c50
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c61
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c388
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h100
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c190
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c159
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt28
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h170
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h219
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile115
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common19
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c329
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c387
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h126
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c166
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h91
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h49
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c194
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h35
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h48
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h141
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h51
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h31
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h49
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c409
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h18
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c273
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h23
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c245
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h23
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c70
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c27
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c243
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c37
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c76
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h35
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c173
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h41
-rw-r--r--drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt17
-rw-r--r--drivers/gpu/mali/mali400ko/mali.spec57
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt24
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile17
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c158
-rw-r--r--drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h25
-rw-r--r--drivers/hwmon/Kconfig38
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ab5500.c212
-rw-r--r--drivers/hwmon/ab8500.c184
-rw-r--r--drivers/hwmon/abx500.c698
-rw-r--r--drivers/hwmon/abx500.h95
-rw-r--r--drivers/hwmon/dbx500.c402
-rw-r--r--drivers/hwmon/hwmon.c21
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c57
-rw-r--r--drivers/input/keyboard/Kconfig12
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/db5500_keypad.c799
-rw-r--r--drivers/input/keyboard/gpio_keys.c21
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c770
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c89
-rw-r--r--drivers/input/misc/Kconfig32
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/ab5500-accdet.c284
-rw-r--r--drivers/input/misc/ab8500-accdet.c464
-rw-r--r--drivers/input/misc/ab8500-ponkey.c213
-rw-r--r--drivers/input/misc/abx500-accdet.c1019
-rw-r--r--drivers/input/misc/ste_ff_vibra.c234
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c499
-rw-r--r--drivers/input/touchscreen/synaptics_i2c_rmi.c675
-rw-r--r--drivers/leds/Kconfig8
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ab5500.c811
-rw-r--r--drivers/leds/leds-lm3530.c25
-rw-r--r--drivers/leds/leds-lp5521.c8
-rw-r--r--drivers/leds/leds-pwm.c61
-rw-r--r--drivers/mfd/Kconfig46
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab5500-core.c72
-rw-r--r--drivers/mfd/ab5500-gpadc.c1256
-rw-r--r--drivers/mfd/ab5500-power.c96
-rw-r--r--drivers/mfd/ab8500-core.c72
-rw-r--r--drivers/mfd/ab8500-debugfs.c1245
-rw-r--r--drivers/mfd/ab8500-denc.c539
-rw-r--r--drivers/mfd/ab8500-gpadc.c90
-rw-r--r--drivers/mfd/ab8500-i2c.c1
-rw-r--r--drivers/mfd/ab8500-sysctrl.c135
-rw-r--r--drivers/mfd/abx500-core.c16
-rw-r--r--drivers/mfd/db5500-prcmu-regs.h141
-rw-r--r--drivers/mfd/db5500-prcmu.c2055
-rw-r--r--drivers/mfd/db8500-prcmu.c141
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h17
-rw-r--r--drivers/mfd/stmpe.c18
-rw-r--r--drivers/mfd/tc35892.c503
-rw-r--r--drivers/mfd/tc3589x.c131
-rw-r--r--drivers/mfd/tps6105x.c1
-rw-r--r--drivers/misc/Kconfig43
-rw-r--r--drivers/misc/Kconfig.stm120
-rw-r--r--drivers/misc/Makefile5
-rw-r--r--drivers/misc/ab8500-pwm.c211
-rw-r--r--drivers/misc/bh1780gli.c192
-rw-r--r--drivers/misc/clonedev/Makefile5
-rw-r--r--drivers/misc/clonedev/clonedev.c312
-rw-r--r--drivers/misc/compdev/Makefile6
-rw-r--r--drivers/misc/compdev/compdev.c1381
-rw-r--r--drivers/misc/dispdev/Makefile1
-rw-r--r--drivers/misc/dispdev/dispdev.c659
-rw-r--r--drivers/misc/hwmem/Makefile3
-rw-r--r--drivers/misc/hwmem/cache_handler.c510
-rw-r--r--drivers/misc/hwmem/cache_handler.h61
-rw-r--r--drivers/misc/hwmem/contig_alloc.c571
-rw-r--r--drivers/misc/hwmem/hwmem-ioctl.c532
-rw-r--r--drivers/misc/hwmem/hwmem-main.c726
-rw-r--r--drivers/misc/stm.c850
-rw-r--r--drivers/mmc/card/block.c110
-rw-r--r--drivers/mmc/card/mmc_test.c180
-rw-r--r--drivers/mmc/core/core.c35
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/host/mmci.c478
-rw-r--r--drivers/mmc/host/mmci.h9
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c51
-rw-r--r--drivers/power/Kconfig19
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab5500_btemp.c994
-rw-r--r--drivers/power/ab5500_charger.c1820
-rw-r--r--drivers/power/ab5500_fg.c1954
-rw-r--r--drivers/power/ab8500_btemp.c19
-rw-r--r--drivers/power/ab8500_charger.c198
-rw-r--r--drivers/power/ab8500_fg.c12
-rw-r--r--drivers/power/abx500_chargalg.c7
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab5500.c650
-rw-r--r--drivers/regulator/ab8500-debug.c2083
-rw-r--r--drivers/regulator/ab8500-debug.h80
-rw-r--r--drivers/regulator/ab8500-ext.c451
-rw-r--r--drivers/regulator/ab8500.c1052
-rw-r--r--drivers/regulator/core.c56
-rw-r--r--drivers/regulator/db5500-prcmu.c334
-rw-r--r--drivers/regulator/dbx500-prcmu.c167
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab.c485
-rw-r--r--drivers/rtc/rtc-ab8500.c53
-rw-r--r--drivers/rtc/rtc-pl031.c14
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-pl022.c17
-rw-r--r--drivers/spi/stm_msp.c1929
-rw-r--r--drivers/staging/android/Kconfig18
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/ab5500-timed-vibra.c490
-rw-r--r--drivers/staging/android/ste_timed_vibra.c431
-rw-r--r--drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c5
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c316
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h1
-rw-r--r--drivers/tee/Kconfig13
-rw-r--r--drivers/tee/Makefile8
-rw-r--r--drivers/tee/tee_driver.c692
-rw-r--r--drivers/tee/tee_service.c17
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c434
-rw-r--r--drivers/usb/core/driver.c78
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c286
-rw-r--r--drivers/usb/core/notify.c7
-rw-r--r--drivers/usb/core/otg_whitelist.h68
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/epautoconf.c6
-rw-r--r--drivers/usb/gadget/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/musb/Kconfig13
-rw-r--r--drivers/usb/musb/musb_core.c63
-rw-r--r--drivers/usb/musb/musb_core.h17
-rw-r--r--drivers/usb/musb/musb_debugfs.c1
-rw-r--r--drivers/usb/musb/musb_gadget.c33
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c54
-rw-r--r--drivers/usb/musb/musb_host.c102
-rw-r--r--drivers/usb/musb/musb_regs.h4
-rw-r--r--drivers/usb/musb/musb_virthub.c2
-rw-r--r--drivers/usb/musb/ux500.c515
-rw-r--r--drivers/usb/musb/ux500_dma.c125
-rw-r--r--drivers/usb/otg/Kconfig9
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/ab5500-usb.c681
-rw-r--r--drivers/usb/otg/ab8500-usb.c1014
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/av8100/Kconfig48
-rw-r--r--drivers/video/av8100/Makefile10
-rw-r--r--drivers/video/av8100/av8100.c4166
-rw-r--r--drivers/video/av8100/av8100_regs.h346
-rw-r--r--drivers/video/av8100/hdmi.c2479
-rw-r--r--drivers/video/av8100/hdmi_loc.h75
-rw-r--r--drivers/video/b2r2/Kconfig134
-rw-r--r--drivers/video/b2r2/Makefile15
-rw-r--r--drivers/video/b2r2/b2r2_api.c1643
-rw-r--r--drivers/video/b2r2/b2r2_blt_main.c2840
-rw-r--r--drivers/video/b2r2/b2r2_control.h33
-rw-r--r--drivers/video/b2r2/b2r2_core.c2763
-rw-r--r--drivers/video/b2r2/b2r2_core.h300
-rw-r--r--drivers/video/b2r2/b2r2_debug.c340
-rw-r--r--drivers/video/b2r2/b2r2_debug.h102
-rw-r--r--drivers/video/b2r2/b2r2_filters.c377
-rw-r--r--drivers/video/b2r2/b2r2_filters.h73
-rw-r--r--drivers/video/b2r2/b2r2_generic.c3206
-rw-r--r--drivers/video/b2r2/b2r2_generic.h51
-rw-r--r--drivers/video/b2r2/b2r2_global.h119
-rw-r--r--drivers/video/b2r2/b2r2_hw.h458
-rw-r--r--drivers/video/b2r2/b2r2_hw_convert.c747
-rw-r--r--drivers/video/b2r2/b2r2_hw_convert.h53
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.c496
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.h31
-rw-r--r--drivers/video/b2r2/b2r2_internal.h610
-rw-r--r--drivers/video/b2r2/b2r2_kernel_if.c37
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.c669
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.h161
-rw-r--r--drivers/video/b2r2/b2r2_node_gen.c83
-rw-r--r--drivers/video/b2r2/b2r2_node_split.c3033
-rw-r--r--drivers/video/b2r2/b2r2_node_split.h124
-rw-r--r--drivers/video/b2r2/b2r2_profiler/Makefile3
-rw-r--r--drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c270
-rw-r--r--drivers/video/b2r2/b2r2_profiler_api.h66
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.c107
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.h22
-rw-r--r--drivers/video/b2r2/b2r2_structures.h226
-rw-r--r--drivers/video/b2r2/b2r2_timing.c22
-rw-r--r--drivers/video/b2r2/b2r2_timing.h22
-rw-r--r--drivers/video/b2r2/b2r2_utils.c1324
-rw-r--r--drivers/video/b2r2/b2r2_utils.h89
-rw-r--r--drivers/video/mcde/Kconfig96
-rw-r--r--drivers/video/mcde/Makefile23
-rw-r--r--drivers/video/mcde/display-ab8500.c494
-rw-r--r--drivers/video/mcde/display-av8100.c1656
-rw-r--r--drivers/video/mcde/display-fictive.c63
-rw-r--r--drivers/video/mcde/display-generic_dsi.c307
-rw-r--r--drivers/video/mcde/display-samsung_s6d16d0.c232
-rw-r--r--drivers/video/mcde/display-sony_acx424akp_dsi.c422
-rw-r--r--drivers/video/mcde/display-vuib500-dpi.c215
-rw-r--r--drivers/video/mcde/dsilink_regs.h2037
-rw-r--r--drivers/video/mcde/mcde_bus.c274
-rw-r--r--drivers/video/mcde/mcde_debugfs.c207
-rw-r--r--drivers/video/mcde/mcde_debugfs.h25
-rw-r--r--drivers/video/mcde/mcde_display.c366
-rw-r--r--drivers/video/mcde/mcde_dss.c445
-rw-r--r--drivers/video/mcde/mcde_fb.c898
-rw-r--r--drivers/video/mcde/mcde_hw.c3834
-rw-r--r--drivers/video/mcde/mcde_mod.c69
-rw-r--r--drivers/video/mcde/mcde_regs.h5096
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/mpcore_wdt.c98
-rw-r--r--drivers/watchdog/ux500_wdt.c454
391 files changed, 122687 insertions, 1680 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d236aef7e59..68e36aed05b 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -126,6 +126,8 @@ source "drivers/platform/Kconfig"
source "drivers/clk/Kconfig"
+source "drivers/tee/Kconfig"
+
source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
@@ -141,3 +143,4 @@ source "drivers/virt/Kconfig"
source "drivers/devfreq/Kconfig"
endmenu
+
diff --git a/drivers/Makefile b/drivers/Makefile
index 95952c82bf1..4ca756c31be 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -122,6 +122,7 @@ obj-y += platform/
obj-y += ieee802154/
#common clk code
obj-y += clk/
+obj-$(CONFIG_TEE_SUPPORT) += tee/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index cc273226dbd..e98838a060e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -117,7 +117,7 @@ static int amba_legacy_resume(struct device *dev)
#ifdef CONFIG_SUSPEND
-static int amba_pm_suspend(struct device *dev)
+int amba_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -135,7 +135,7 @@ static int amba_pm_suspend(struct device *dev)
return ret;
}
-static int amba_pm_resume(struct device *dev)
+int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -162,7 +162,7 @@ static int amba_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-static int amba_pm_freeze(struct device *dev)
+int amba_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -180,7 +180,7 @@ static int amba_pm_freeze(struct device *dev)
return ret;
}
-static int amba_pm_thaw(struct device *dev)
+int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -198,7 +198,7 @@ static int amba_pm_thaw(struct device *dev)
return ret;
}
-static int amba_pm_poweroff(struct device *dev)
+int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -216,7 +216,7 @@ static int amba_pm_poweroff(struct device *dev)
return ret;
}
-static int amba_pm_restore(struct device *dev)
+int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5138927a416..6f86f8ca9b0 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -19,13 +19,27 @@ config DW_APB_TIMER
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
- default y
+ default y if UX500_SOC_DB8500
help
Use the always on PRCMU Timer as clocksource
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- bool "Clocksource PRCMU Timer sched_clock"
- depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
+ bool
+ depends on CLKSRC_DBX500_PRCMU
+ select HAVE_SCHED_CLOCK
+ help
+ Use the always on PRCMU Timer as sched_clock
+
+config CLKSRC_DB5500_MTIMER
+ bool "Clocksource MTIMER"
+ depends on UX500_SOC_DB5500
default y
help
+ Use the always on MTIMER as clocksource
+
+config CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+ bool
+ depends on CLKSRC_DB5500_MTIMER
+ select HAVE_SCHED_CLOCK
+ help
Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d3265..9b10f6b7536 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
-obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file
+obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
+obj-$(CONFIG_CLKSRC_DB5500_MTIMER) += db5500-mtimer.o
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index c26c369eb9e..dc71e432dc5 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -14,6 +14,9 @@
*/
#include <linux/clockchips.h>
#include <linux/clksrc-dbx500-prcmu.h>
+#ifdef CONFIG_BOOTTIME
+#include <linux/boottime.h>
+#endif
#include <asm/sched_clock.h>
@@ -68,6 +71,23 @@ static u32 notrace dbx500_prcmu_sched_clock_read(void)
#endif
+#ifdef CONFIG_BOOTTIME
+static unsigned long __init boottime_get_time(void)
+{
+ return div_s64(clocksource_cyc2ns(clocksource_dbx500_prcmu.read(
+ &clocksource_dbx500_prcmu),
+ clocksource_dbx500_prcmu.mult,
+ clocksource_dbx500_prcmu.shift),
+ 1000);
+}
+
+static struct boottime_timer __initdata boottime_timer = {
+ .init = NULL,
+ .get_time = boottime_get_time,
+ .finalize = NULL,
+};
+#endif
+
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
{
clksrc_dbx500_timer_base = base;
@@ -90,4 +110,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
+#ifdef CONFIG_BOOTTIME
+ boottime_activate(&boottime_timer);
+#endif
}
diff --git a/drivers/clocksource/db5500-mtimer.c b/drivers/clocksource/db5500-mtimer.c
new file mode 100644
index 00000000000..5e64da19e66
--- /dev/null
+++ b/drivers/clocksource/db5500-mtimer.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/clockchips.h>
+#include <linux/clksrc-db5500-mtimer.h>
+#include <linux/boottime.h>
+
+#include <asm/sched_clock.h>
+
+#define MTIMER_PRIMARY_COUNTER 0x18
+
+static void __iomem *db5500_mtimer_base;
+
+#ifdef CONFIG_CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+static DEFINE_CLOCK_DATA(cd);
+
+unsigned long long notrace sched_clock(void)
+{
+ u32 cyc;
+
+ if (unlikely(!db5500_mtimer_base))
+ return 0;
+
+ cyc = readl_relaxed(db5500_mtimer_base + MTIMER_PRIMARY_COUNTER);
+
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+static void notrace db5500_mtimer_update_sched_clock(void)
+{
+ u32 cyc = readl_relaxed(db5500_mtimer_base + MTIMER_PRIMARY_COUNTER);
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+#endif
+
+#ifdef CONFIG_BOOTTIME
+static unsigned long __init boottime_get_time(void)
+{
+ return sched_clock();
+}
+
+static struct boottime_timer __initdata boottime_timer = {
+ .init = NULL,
+ .get_time = boottime_get_time,
+ .finalize = NULL,
+};
+#endif
+
+void __init db5500_mtimer_init(void __iomem *base)
+{
+ db5500_mtimer_base = base;
+
+ clocksource_mmio_init(base + MTIMER_PRIMARY_COUNTER, "mtimer", 32768,
+ 400, 32, clocksource_mmio_readl_up);
+
+#ifdef CONFIG_CLKSRC_DB5500_MTIMER_SCHED_CLOCK
+ init_sched_clock(&cd, db5500_mtimer_update_sched_clock,
+ 32, 32768);
+#endif
+ boottime_activate(&boottime_timer);
+}
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9531fc2eda2..44aa7093235 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB5500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7f2f149ae40..4de1fff790f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -376,6 +376,27 @@ show_one(scaling_cur_freq, cur);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
+int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max)
+{
+ int ret;
+ struct cpufreq_policy new_policy;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ ret = cpufreq_get_policy(&new_policy, cpu);
+ if (ret)
+ return -EINVAL;
+
+ new_policy.min = min;
+ new_policy.max = max;
+
+ ret = __cpufreq_set_policy(policy, &new_policy);
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_update_freq);
+
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 836e9b062e5..765256e4904 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -79,7 +79,6 @@ struct cpu_dbs_info_s {
cputime64_t prev_cpu_wall;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
- struct delayed_work work;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
@@ -95,8 +94,10 @@ struct cpu_dbs_info_s {
struct mutex timer_mutex;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
+static DEFINE_PER_CPU(struct delayed_work, ondemand_work);
static unsigned int dbs_enable; /* number of CPUs using this policy */
+static ktime_t time_stamp;
/*
* dbs_mutex protects dbs_enable in governor start/stop.
@@ -290,22 +291,23 @@ static void update_sampling_rate(unsigned int new_rate)
mutex_lock(&dbs_info->timer_mutex);
- if (!delayed_work_pending(&dbs_info->work)) {
+ if (!delayed_work_pending(&per_cpu(ondemand_work, cpu))) {
mutex_unlock(&dbs_info->timer_mutex);
continue;
}
next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->work.timer.expires;
+ appointed_at = per_cpu(ondemand_work, cpu).timer.expires;
if (time_before(next_sampling, appointed_at)) {
mutex_unlock(&dbs_info->timer_mutex);
- cancel_delayed_work_sync(&dbs_info->work);
+ cancel_delayed_work_sync(&per_cpu(ondemand_work, cpu));
mutex_lock(&dbs_info->timer_mutex);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
+ schedule_delayed_work_on(dbs_info->cpu,
+ &per_cpu(ondemand_work, cpu),
usecs_to_jiffies(new_rate));
}
@@ -449,6 +451,26 @@ static struct attribute_group dbs_attr_group = {
/************************** sysfs end ************************/
+static bool dbs_sw_coordinated_cpus(void)
+{
+ struct cpu_dbs_info_s *dbs_info;
+ struct cpufreq_policy *policy;
+ int i = 0;
+ int j;
+
+ dbs_info = &per_cpu(od_cpu_dbs_info, 0);
+ policy = dbs_info->cur_policy;
+
+ for_each_cpu(j, policy->cpus) {
+ i++;
+ }
+
+ if (i > 1)
+ return true; /* Dependant CPUs */
+ else
+ return false;
+}
+
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
{
if (dbs_tuners_ins.powersave_bias)
@@ -463,7 +485,6 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int max_load_freq;
-
struct cpufreq_policy *policy;
unsigned int j;
@@ -598,20 +619,42 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
static void do_dbs_timer(struct work_struct *work)
{
- struct cpu_dbs_info_s *dbs_info =
- container_of(work, struct cpu_dbs_info_s, work.work);
- unsigned int cpu = dbs_info->cpu;
- int sample_type = dbs_info->sample_type;
-
+ struct cpu_dbs_info_s *dbs_info;
+ unsigned int cpu = smp_processor_id();
+ int sample_type;
int delay;
+ bool sample = true;
+
+ /* If SW dependant CPUs, use CPU 0 as leader */
+ if (dbs_sw_coordinated_cpus()) {
+
+ ktime_t time_now;
+ s64 delta_us;
- mutex_lock(&dbs_info->timer_mutex);
+ dbs_info = &per_cpu(od_cpu_dbs_info, 0);
+ mutex_lock(&dbs_info->timer_mutex);
+
+ time_now = ktime_get();
+ delta_us = ktime_us_delta(time_now, time_stamp);
+
+ /* Do nothing if we recently have sampled */
+ if (delta_us < (s64)(dbs_tuners_ins.sampling_rate / 2))
+ sample = false;
+ else
+ time_stamp = time_now;
+ } else {
+ dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ mutex_lock(&dbs_info->timer_mutex);
+ }
+
+ sample_type = dbs_info->sample_type;
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
if (!dbs_tuners_ins.powersave_bias ||
sample_type == DBS_NORMAL_SAMPLE) {
- dbs_check_cpu(dbs_info);
+ if (sample)
+ dbs_check_cpu(dbs_info);
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
@@ -627,15 +670,17 @@ static void do_dbs_timer(struct work_struct *work)
delay -= jiffies % delay;
}
} else {
- __cpufreq_driver_target(dbs_info->cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ if (sample)
+ __cpufreq_driver_target(dbs_info->cur_policy,
+ dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
delay = dbs_info->freq_lo_jiffies;
}
- schedule_delayed_work_on(cpu, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &per_cpu(ondemand_work, cpu), delay);
mutex_unlock(&dbs_info->timer_mutex);
}
-static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
+static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info, int cpu)
{
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
@@ -644,13 +689,18 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
delay -= jiffies % delay;
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
+ cancel_delayed_work_sync(&per_cpu(ondemand_work, cpu));
+ schedule_delayed_work_on(cpu, &per_cpu(ondemand_work, cpu), delay);
+}
+
+static inline void dbs_timer_exit(int cpu)
+{
+ cancel_delayed_work_sync(&per_cpu(ondemand_work, cpu));
}
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+static void dbs_timer_exit_per_cpu(struct work_struct *dummy)
{
- cancel_delayed_work_sync(&dbs_info->work);
+ dbs_timer_exit(smp_processor_id());
}
/*
@@ -676,6 +726,42 @@ static int should_io_be_busy(void)
return 0;
}
+static int __cpuinit cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct device *cpu_dev;
+ struct cpu_dbs_info_s *dbs_info;
+
+ if (dbs_sw_coordinated_cpus())
+ dbs_info = &per_cpu(od_cpu_dbs_info, 0);
+ else
+ dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+
+ cpu_dev = get_cpu_device(cpu);
+ if (cpu_dev) {
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ dbs_timer_init(dbs_info, cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ dbs_timer_exit(cpu);
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ dbs_timer_init(dbs_info, cpu);
+ break;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata ondemand_cpu_notifier = {
+ .notifier_call = cpu_callback,
+};
+
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -704,9 +790,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ mutex_init(&j_dbs_info->timer_mutex);
+ INIT_DELAYED_WORK_DEFERRABLE(&per_cpu(ondemand_work, j),
+ do_dbs_timer);
+
+ j_dbs_info->rate_mult = 1;
}
this_dbs_info->cpu = cpu;
- this_dbs_info->rate_mult = 1;
ondemand_powersave_bias_init_cpu(cpu);
/*
* Start the timerschedule work, when this governor
@@ -736,21 +826,46 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
mutex_unlock(&dbs_mutex);
- mutex_init(&this_dbs_info->timer_mutex);
- dbs_timer_init(this_dbs_info);
+ /* If SW coordinated CPUs then register notifier */
+ if (dbs_sw_coordinated_cpus()) {
+ register_hotcpu_notifier(&ondemand_cpu_notifier);
+
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, 0);
+ dbs_timer_init(j_dbs_info, j);
+ }
+
+ /* Initiate timer time stamp */
+ time_stamp = ktime_get();
+
+
+ } else
+ dbs_timer_init(this_dbs_info, cpu);
break;
case CPUFREQ_GOV_STOP:
- dbs_timer_exit(this_dbs_info);
+
+ dbs_timer_exit(cpu);
mutex_lock(&dbs_mutex);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
- if (!dbs_enable)
+ if (!dbs_enable) {
sysfs_remove_group(cpufreq_global_kobject,
&dbs_attr_group);
+ if (dbs_sw_coordinated_cpus()) {
+ /*
+ * Make sure all pending timers/works are
+ * stopped.
+ */
+ schedule_on_each_cpu(dbs_timer_exit_per_cpu);
+ unregister_hotcpu_notifier(&ondemand_cpu_notifier);
+ }
+ }
break;
case CPUFREQ_GOV_LIMITS:
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
deleted file mode 100644
index 0bf1b8910ee..00000000000
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include <mach/id.h>
-
-static struct cpufreq_frequency_table freq_table[] = {
- [0] = {
- .index = 0,
- .frequency = 200000,
- },
- [1] = {
- .index = 1,
- .frequency = 400000,
- },
- [2] = {
- .index = 2,
- .frequency = 800000,
- },
- [3] = {
- /* Used for MAX_OPP, if available */
- .index = 3,
- .frequency = CPUFREQ_TABLE_END,
- },
- [4] = {
- .index = 4,
- .frequency = CPUFREQ_TABLE_END,
- },
-};
-
-static enum arm_opp idx2opp[] = {
- ARM_EXTCLK,
- ARM_50_OPP,
- ARM_100_OPP,
- ARM_MAX_OPP
-};
-
-static struct freq_attr *db8500_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_freqs freqs;
- unsigned int idx;
-
- /* scale the target frequency to one of the extremes supported */
- if (target_freq < policy->cpuinfo.min_freq)
- target_freq = policy->cpuinfo.min_freq;
- if (target_freq > policy->cpuinfo.max_freq)
- target_freq = policy->cpuinfo.max_freq;
-
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
- return -EINVAL;
- }
-
- freqs.old = policy->cur;
- freqs.new = freq_table[idx].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- /* pre-change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- /* request the PRCM unit for opp change */
- if (prcmu_set_arm_opp(idx2opp[idx])) {
- pr_err("db8500-cpufreq: Failed to set OPP level\n");
- return -EINVAL;
- }
-
- /* post change notification */
- for_each_cpu(freqs.cpu, policy->cpus)
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return 0;
-}
-
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
-{
- int i;
- /* request the prcm to get the current ARM opp */
- for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
- ;
- return freq_table[i].frequency;
-}
-
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
-{
- int i, res;
-
- BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
-
- if (prcmu_has_arm_maxopp())
- freq_table[3].frequency = 1000000;
-
- pr_info("db8500-cpufreq : Available frequencies:\n");
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
-
- /* get policy fields based on the table */
- res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!res)
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- return res;
- }
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /*
- * FIXME : Need to take time measurement across the target()
- * function with no/some/all drivers in the notification
- * list.
- */
- policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
- /* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, cpu_present_mask);
-
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-
- return 0;
-}
-
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
-};
-
-static int __init db8500_cpufreq_register(void)
-{
- if (!cpu_is_u8500v20_or_later())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return cpufreq_register_driver(&db8500_cpufreq_driver);
-}
-device_initcall(db8500_cpufreq_register);
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
new file mode 100644
index 00000000000..a6f991e2fb6
--- /dev/null
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/id.h>
+
+static struct cpufreq_frequency_table db8500_freq_table[] = {
+ [0] = {
+ .index = 0,
+ .frequency = 200000,
+ },
+ [1] = {
+ .index = 1,
+ .frequency = 400000,
+ },
+ [2] = {
+ .index = 2,
+ .frequency = 800000,
+ },
+ [3] = {
+ /* Used for MAX_OPP, if available */
+ .index = 3,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+ [4] = {
+ .index = 4,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+static struct cpufreq_frequency_table db5500_freq_table[] = {
+ [0] = {
+ .index = 0,
+ .frequency = 200000,
+ },
+ [1] = {
+ .index = 1,
+ .frequency = 396500,
+ },
+ [2] = {
+ .index = 2,
+ .frequency = 793000,
+ },
+ [3] = {
+ .index = 3,
+ .frequency = CPUFREQ_TABLE_END,
+ },
+};
+
+static struct cpufreq_frequency_table *freq_table;
+static int freq_table_len;
+
+static enum arm_opp db8500_idx2opp[] = {
+ ARM_EXTCLK,
+ ARM_50_OPP,
+ ARM_100_OPP,
+ ARM_MAX_OPP
+};
+
+static enum arm_opp db5500_idx2opp[] = {
+ ARM_EXTCLK,
+ ARM_50_OPP,
+ ARM_100_OPP,
+};
+
+static enum arm_opp *idx2opp;
+
+static struct freq_attr *dbx500_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int idx;
+
+ /* scale the target frequency to one of the extremes supported */
+ if (target_freq < policy->cpuinfo.min_freq)
+ target_freq = policy->cpuinfo.min_freq;
+ if (target_freq > policy->cpuinfo.max_freq)
+ target_freq = policy->cpuinfo.max_freq;
+
+ /* Lookup the next frequency */
+ if (cpufreq_frequency_table_target
+ (policy, freq_table, target_freq, relation, &idx)) {
+ return -EINVAL;
+ }
+
+ freqs.old = policy->cur;
+ freqs.new = freq_table[idx].frequency;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ /* pre-change notification */
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ BUG_ON(idx >= freq_table_len);
+
+ /* request the PRCM unit for opp change */
+ if (prcmu_set_arm_opp(idx2opp[idx])) {
+ pr_err("ux500-cpufreq: Failed to set OPP level\n");
+ return -EINVAL;
+ }
+
+ /* post change notification */
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
+{
+ int i;
+ enum arm_opp current_opp;
+
+ current_opp = prcmu_get_arm_opp();
+
+ /* request the prcm to get the current ARM opp */
+ for (i = 0; i < freq_table_len; i++) {
+ if (current_opp == idx2opp[i])
+ return freq_table[i].frequency;
+ }
+
+ pr_err("cpufreq: ERROR: unknown opp %d given from prcmufw!\n",
+ current_opp);
+ BUG_ON(1);
+
+ /*
+ * Better to return something that might be correct than
+ * errno or zero, since clk_get_rate() won't do well with an errno.
+ */
+ return freq_table[0].frequency;
+}
+
+static void __init dbx500_cpufreq_init_maxopp_freq(void)
+{
+ struct prcmu_fw_version *fw_version = prcmu_get_fw_version();
+
+ if ((fw_version == NULL) || !prcmu_has_arm_maxopp())
+ return;
+
+ switch (fw_version->project) {
+ case PRCMU_FW_PROJECT_U8500:
+ case PRCMU_FW_PROJECT_U9500:
+ case PRCMU_FW_PROJECT_U8420:
+ freq_table[3].frequency = 1000000;
+ break;
+ case PRCMU_FW_PROJECT_U8500_C2:
+ case PRCMU_FW_PROJECT_U9500_C2:
+ case PRCMU_FW_PROJECT_U8520:
+ freq_table[3].frequency = 1150000;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool initialized;
+
+static void __init dbx500_cpufreq_early_init(void)
+{
+ if (cpu_is_u5500()) {
+ freq_table = db5500_freq_table;
+ idx2opp = db5500_idx2opp;
+ freq_table_len = ARRAY_SIZE(db5500_freq_table);
+ } else if (cpu_is_u8500()) {
+ freq_table = db8500_freq_table;
+ idx2opp = db8500_idx2opp;
+ dbx500_cpufreq_init_maxopp_freq();
+ freq_table_len = ARRAY_SIZE(db8500_freq_table);
+ if (!prcmu_has_arm_maxopp())
+ freq_table_len--;
+ } else {
+ ux500_unknown_soc();
+ }
+ initialized = true;
+}
+
+/*
+ * This is called from localtimer initialization, via the clk_get_rate() for
+ * the smp_twd clock. This is way before cpufreq is initialized.
+ */
+unsigned long dbx500_cpufreq_getfreq(void)
+{
+ if (!initialized)
+ dbx500_cpufreq_early_init();
+
+ return dbx500_cpufreq_getspeed(0) * 1000;
+}
+
+int dbx500_cpufreq_percent2freq(int percent)
+{
+ int op;
+ int i;
+
+ switch (percent) {
+ case 0:
+ /* Fall through */
+ case 25:
+ op = ARM_EXTCLK;
+ break;
+ case 50:
+ op = ARM_50_OPP;
+ break;
+ case 100:
+ op = ARM_100_OPP;
+ break;
+ case 125:
+ if (cpu_is_u8500() && prcmu_has_arm_maxopp())
+ op = ARM_MAX_OPP;
+ else
+ op = ARM_100_OPP;
+ break;
+ default:
+ pr_err("cpufreq-dbx500: Incorrect arm target value (%d).\n",
+ percent);;
+ return -EINVAL;
+ break;
+ }
+
+ for (i = 0; idx2opp[i] != op && i < freq_table_len; i++)
+ ;
+
+ if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
+ pr_err("cpufreq-dbx500: Matching frequency does not exist!\n");
+ return -EINVAL;
+ }
+
+ return freq_table[i].frequency;
+}
+
+int dbx500_cpufreq_get_limits(int cpu, int r,
+ unsigned int *min, unsigned int *max)
+{
+ int freq;
+ int ret;
+ static int old_freq;
+ struct cpufreq_policy p;
+
+ freq = dbx500_cpufreq_percent2freq(r);
+
+ if (freq < 0)
+ return -EINVAL;
+
+ if (freq != old_freq)
+ pr_debug("cpufreq-dbx500: set min arm freq to %d\n",
+ freq);
+
+ (*min) = freq;
+
+ ret = cpufreq_get_policy(&p, cpu);
+ if (ret) {
+ pr_err("cpufreq-dbx500: Failed to get policy.\n");
+ return -EINVAL;
+ }
+
+ (*max) = p.max;
+ return 0;
+}
+
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int res;
+
+ /* get policy fields based on the table */
+ res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (!res)
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ else {
+ pr_err("dbx500-cpufreq : Failed to read policy table\n");
+ return res;
+ }
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+ /*
+ * FIXME : Need to take time measurement across the target()
+ * function with no/some/all drivers in the notification
+ * list.
+ */
+ policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
+
+ /* policy sharing between dual CPUs */
+ cpumask_copy(policy->cpus, cpu_present_mask);
+
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+
+ return 0;
+}
+
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
+};
+
+static int __init dbx500_cpufreq_register(void)
+{
+ int i;
+
+ if (!initialized)
+ dbx500_cpufreq_early_init();
+
+ pr_info("dbx500-cpufreq : Available frequencies:\n");
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ pr_info(" %d Mhz\n", freq_table[i].frequency / 1000);
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
+}
+device_initcall(dbx500_cpufreq_register);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index dd414d9350e..638648816c9 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -296,4 +296,15 @@ config CRYPTO_DEV_TEGRA_AES
To compile this driver as a module, choose M here: the module
will be called tegra-aes.
+config CRYPTO_DEV_UX500
+ tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
+ depends on ARCH_U8500
+ select CRYPTO_ALGAPI
+ help
+ Driver for ST-Ericsson UX500 crypto engine.
+
+if CRYPTO_DEV_UX500
+ source "drivers/crypto/ux500/Kconfig"
+endif # if CRYPTO_DEV_UX500
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f3e64eadd7a..8737ed1bdfe 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
new file mode 100644
index 00000000000..165a03d46c0
--- /dev/null
+++ b/drivers/crypto/ux500/Kconfig
@@ -0,0 +1,29 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+config CRYPTO_DEV_UX500_CRYP
+ tristate "UX500 crypto driver for CRYP block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_DES
+ help
+ This is the driver for the crypto block CRYP.
+
+config CRYPTO_DEV_UX500_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
+ select CRYPTO_HMAC
+ help
+ This selects the UX500 hash driver for the HASH hardware.
+ Depends on U8500/STM DMA if running in DMA mode.
+
+config CRYPTO_DEV_UX500_DEBUG
+ bool "Activate ux500 platform debug-mode for crypto and hash block"
+ depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
+ default n
+ help
+ Say Y if you want to add debug prints to ux500_hash and
+ ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
new file mode 100644
index 00000000000..b9a365bade8
--- /dev/null
+++ b/drivers/crypto/ux500/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
new file mode 100644
index 00000000000..e5d362a6f68
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -0,0 +1,13 @@
+#/*
+# * Copyright (C) ST-Ericsson SA 2010
+# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
+# * License terms: GNU General Public License (GPL) version 2 */
+
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_cryp_core.o := -DDEBUG -O0
+CFLAGS_cryp.o := -DDEBUG -O0
+CFLAGS_cryp_irq.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
+ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
new file mode 100644
index 00000000000..cec92af2f73
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -0,0 +1,418 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+/**
+ * cryp_wait_until_done - wait until the device logic is not busy
+ */
+void cryp_wait_until_done(struct cryp_device_data *device_data)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+}
+
+/**
+ * cryp_check - This routine checks Peripheral and PCell Id
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_check(struct cryp_device_data *device_data)
+{
+ int peripheralID2 = 0;
+
+ if (NULL == device_data)
+ return -EINVAL;
+
+ if (cpu_is_u8500() || cpu_is_u9540())
+ peripheralID2 = CRYP_PERIPHERAL_ID2_DB8500;
+ else if (cpu_is_u5500())
+ peripheralID2 = CRYP_PERIPHERAL_ID2_DB5500;
+
+ /* Check Peripheral and Pcell Id Register for CRYP */
+ if ((CRYP_PERIPHERAL_ID0 ==
+ readl_relaxed(&device_data->base->periphId0))
+ && (CRYP_PERIPHERAL_ID1 ==
+ readl_relaxed(&device_data->base->periphId1))
+ && (peripheralID2 ==
+ readl_relaxed(&device_data->base->periphId2))
+ && (CRYP_PERIPHERAL_ID3 ==
+ readl_relaxed(&device_data->base->periphId3))
+ && (CRYP_PCELL_ID0 ==
+ readl_relaxed(&device_data->base->pcellId0))
+ && (CRYP_PCELL_ID1 ==
+ readl_relaxed(&device_data->base->pcellId1))
+ && (CRYP_PCELL_ID2 ==
+ readl_relaxed(&device_data->base->pcellId2))
+ && (CRYP_PCELL_ID3 ==
+ readl_relaxed(&device_data->base->pcellId3))) {
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+/**
+ * cryp_activity - This routine enables/disable the cryptography function.
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_activity: Enable/Disable functionality
+ */
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen)
+{
+ CRYP_PUT_BITS(&device_data->base->cr,
+ cryp_crypen,
+ CRYP_CR_CRYPEN_POS,
+ CRYP_CR_CRYPEN_MASK);
+}
+
+/**
+ * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
+ * @device_data: Pointer to the device data struct for base address.
+ */
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
+{
+ /*
+ * We always need to disble the hardware before trying to flush the
+ * FIFO. This is something that isn't written in the design
+ * specification, but we have been informed by the hardware designers
+ * that this must be done.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
+ /*
+ * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
+ * register when starting a new calculation, which means Input FIFO is
+ * not full and input FIFO is empty.
+ */
+ while (readl_relaxed(&device_data->base->sr) !=
+ CRYP_SR_INFIFO_READY_MASK)
+ cpu_relax();
+}
+
+/**
+ * cryp_set_configuration - This routine set the cr CRYP IP
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_config: Pointer to the configuration parameter
+ * @control_register: The control register to be written later on.
+ */
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register)
+{
+ u32 cr_for_kse;
+
+ if (NULL == device_data || NULL == cryp_config)
+ return -EINVAL;
+
+ *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
+
+ /* Prepare key for decryption in AES_ECB and AES_CBC mode. */
+ if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
+ ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
+ (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
+ cr_for_kse = *control_register;
+ /*
+ * This seems a bit odd, but it is indeed needed to set this to
+ * encrypt even though it is a decryption that we are doing. It
+ * also mentioned in the design spec that you need to do this.
+ * After the keyprepartion for decrypting is done you should set
+ * algodir back to decryption, which is done outside this if
+ * statement.
+ *
+ * According to design specification we should set mode ECB
+ * during key preparation even though we might be running CBC
+ * when enter this function.
+ *
+ * Writing to KSE_ENABLED will drop CRYPEN when key preparation
+ * is done. Therefore we need to set CRYPEN again outside this
+ * if statement when running decryption.
+ */
+ cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
+ (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
+ (KSE_ENABLED << CRYP_CR_KSE_POS));
+
+ writel_relaxed(cr_for_kse, &device_data->base->cr);
+ cryp_wait_until_done(device_data);
+ }
+
+ *control_register |=
+ ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
+ (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
+
+ return 0;
+}
+
+/**
+ * cryp_configure_protection - set the protection bits in the CRYP logic.
+ * @device_data: Pointer to the device data struct for base address.
+ * @p_protect_config: Pointer to the protection mode and
+ * secure mode configuration
+ */
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config)
+{
+ if (NULL == p_protect_config)
+ return -EINVAL;
+
+ CRYP_WRITE_BIT(&device_data->base->cr,
+ (u32) p_protect_config->secure_access,
+ CRYP_CR_SECURE_MASK);
+ CRYP_PUT_BITS(&device_data->base->cr,
+ p_protect_config->privilege_access,
+ CRYP_CR_PRLG_POS,
+ CRYP_CR_PRLG_MASK);
+
+ return 0;
+}
+
+/**
+ * cryp_is_logic_busy - returns the busy status of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_is_logic_busy(struct cryp_device_data *device_data)
+{
+ return CRYP_TEST_BITS(&device_data->base->sr,
+ CRYP_SR_BUSY_MASK);
+}
+
+/**
+ * cryp_configure_for_dma - configures the CRYP IP for DMA operation
+ * @device_data: Pointer to the device data struct for base address.
+ * @dma_req: Specifies the DMA request type value.
+ */
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req)
+{
+ CRYP_SET_BITS(&device_data->base->dmacr,
+ (u32) dma_req);
+}
+
+/**
+ * cryp_configure_key_values - configures the key values for CRYP operations
+ * @device_data: Pointer to the device data struct for base address.
+ * @key_reg_index: Key value index register
+ * @key_value: The key value struct
+ */
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (key_reg_index) {
+ case CRYP_KEY_REG_1:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_1_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_1_r);
+ break;
+ case CRYP_KEY_REG_2:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_2_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_2_r);
+ break;
+ case CRYP_KEY_REG_3:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_3_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_3_r);
+ break;
+ case CRYP_KEY_REG_4:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_4_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_4_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+/**
+ * cryp_configure_init_vector - configures the initialization vector register
+ * @device_data: Pointer to the device data struct for base address.
+ * @init_vector_index: Specifies the index of the init vector.
+ * @init_vector_value: Specifies the value for the init vector.
+ */
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (init_vector_index) {
+ case CRYP_INIT_VECTOR_INDEX_0:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_0_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_0_r);
+ break;
+ case CRYP_INIT_VECTOR_INDEX_1:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_1_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_1_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cryp_save_device_context - Store hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode)
+{
+ enum cryp_algo_mode algomode;
+ struct cryp_register *src_reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Always start by disable the hardware and wait for it to finish the
+ * ongoing calculations before trying to reprogram it.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
+
+ if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
+ ctx->din = readl_relaxed(&src_reg->din);
+
+ ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
+
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
+ ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
+ ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
+ ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
+
+ default:
+ ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
+ ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
+ }
+
+ /* Save IV for CBC mode for both AES and DES. */
+ algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
+ if (algomode == CRYP_ALGO_TDES_CBC ||
+ algomode == CRYP_ALGO_DES_CBC ||
+ algomode == CRYP_ALGO_AES_CBC) {
+ ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
+ ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
+ ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
+ ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_restore_device_context - Restore hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx)
+{
+ struct cryp_register *reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Fall through for all items in switch statement. DES is captured in
+ * the default.
+ */
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ writel_relaxed(ctx->key_4_l, &reg->key_4_l);
+ writel_relaxed(ctx->key_4_r, &reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ writel_relaxed(ctx->key_3_l, &reg->key_3_l);
+ writel_relaxed(ctx->key_3_r, &reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ writel_relaxed(ctx->key_2_l, &reg->key_2_l);
+ writel_relaxed(ctx->key_2_r, &reg->key_2_r);
+
+ default:
+ writel_relaxed(ctx->key_1_l, &reg->key_1_l);
+ writel_relaxed(ctx->key_1_r, &reg->key_1_r);
+ }
+
+ /* Restore IV for CBC mode for AES and DES. */
+ if (config->algomode == CRYP_ALGO_TDES_CBC ||
+ config->algomode == CRYP_ALGO_DES_CBC ||
+ config->algomode == CRYP_ALGO_AES_CBC) {
+ writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
+ writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
+ writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
+ writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data word to write
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data)
+{
+ writel_relaxed(write_data, &device_data->base->din);
+
+ return 0;
+}
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data)
+{
+ *read_data = readl_relaxed(&device_data->base->dout);
+
+ return 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
new file mode 100644
index 00000000000..df2e25d4671
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -0,0 +1,308 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_H_
+#define _CRYP_H_
+
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/klist.h>
+#include <linux/mutex.h>
+
+#define DEV_DBG_NAME "crypX crypX:"
+
+/* CRYP enable/disable */
+enum cryp_crypen {
+ CRYP_CRYPEN_DISABLE = 0,
+ CRYP_CRYPEN_ENABLE = 1
+};
+
+/* CRYP Start Computation enable/disable */
+enum cryp_start {
+ CRYP_START_DISABLE = 0,
+ CRYP_START_ENABLE = 1
+};
+
+/* CRYP Init Signal enable/disable */
+enum cryp_init {
+ CRYP_INIT_DISABLE = 0,
+ CRYP_INIT_ENABLE = 1
+};
+
+/* Cryp State enable/disable */
+enum cryp_state {
+ CRYP_STATE_DISABLE = 0,
+ CRYP_STATE_ENABLE = 1
+};
+
+/* Key preparation bit enable */
+enum cryp_key_prep {
+ KSE_DISABLED = 0,
+ KSE_ENABLED = 1
+};
+
+/* Key size for AES */
+#define CRYP_KEY_SIZE_128 (0)
+#define CRYP_KEY_SIZE_192 (1)
+#define CRYP_KEY_SIZE_256 (2)
+
+/* AES modes */
+enum cryp_algo_mode {
+ CRYP_ALGO_TDES_ECB,
+ CRYP_ALGO_TDES_CBC,
+ CRYP_ALGO_DES_ECB,
+ CRYP_ALGO_DES_CBC,
+ CRYP_ALGO_AES_ECB,
+ CRYP_ALGO_AES_CBC,
+ CRYP_ALGO_AES_CTR,
+ CRYP_ALGO_AES_XTS
+};
+
+/* Cryp Encryption or Decryption */
+enum cryp_algorithm_dir {
+ CRYP_ALGORITHM_ENCRYPT,
+ CRYP_ALGORITHM_DECRYPT
+};
+
+/* Hardware access method */
+enum cryp_mode {
+ CRYP_MODE_POLLING,
+ CRYP_MODE_INTERRUPT,
+ CRYP_MODE_DMA
+};
+
+/**
+ * struct cryp_config -
+ * @keysize: Key size for AES
+ * @algomode: AES modes
+ * @algodir: Cryp Encryption or Decryption
+ *
+ * CRYP configuration structure to be passed to set configuration
+ */
+struct cryp_config {
+ int keysize;
+ enum cryp_algo_mode algomode;
+ enum cryp_algorithm_dir algodir;
+};
+
+/**
+ * struct cryp_protection_config -
+ * @privilege_access: Privileged cryp state enable/disable
+ * @secure_access: Secure cryp state enable/disable
+ *
+ * Protection configuration structure for setting privilage access
+ */
+struct cryp_protection_config {
+ enum cryp_state privilege_access;
+ enum cryp_state secure_access;
+};
+
+/* Cryp status */
+enum cryp_status_id {
+ CRYP_STATUS_BUSY = 0x10,
+ CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
+ CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
+ CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
+ CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
+};
+
+/* Cryp DMA interface */
+enum cryp_dma_req_type {
+ CRYP_DMA_DISABLE_BOTH,
+ CRYP_DMA_ENABLE_IN_DATA,
+ CRYP_DMA_ENABLE_OUT_DATA,
+ CRYP_DMA_ENABLE_BOTH_DIRECTIONS
+};
+
+enum cryp_dma_channel {
+ CRYP_DMA_RX = 0,
+ CRYP_DMA_TX
+};
+
+/* Key registers */
+enum cryp_key_reg_index {
+ CRYP_KEY_REG_1,
+ CRYP_KEY_REG_2,
+ CRYP_KEY_REG_3,
+ CRYP_KEY_REG_4
+};
+
+/* Key register left and right */
+struct cryp_key_value {
+ u32 key_value_left;
+ u32 key_value_right;
+};
+
+/* Cryp Initialization structure */
+enum cryp_init_vector_index {
+ CRYP_INIT_VECTOR_INDEX_0,
+ CRYP_INIT_VECTOR_INDEX_1
+};
+
+/* struct cryp_init_vector_value -
+ * @init_value_left
+ * @init_value_right
+ * */
+struct cryp_init_vector_value {
+ u32 init_value_left;
+ u32 init_value_right;
+};
+
+/**
+ * struct cryp_device_context - structure for a cryp context.
+ * @cr: control register
+ * @dmacr: DMA control register
+ * @imsc: Interrupt mask set/clear register
+ * @key_1_l: Key 1l register
+ * @key_1_r: Key 1r register
+ * @key_2_l: Key 2l register
+ * @key_2_r: Key 2r register
+ * @key_3_l: Key 3l register
+ * @key_3_r: Key 3r register
+ * @key_4_l: Key 4l register
+ * @key_4_r: Key 4r register
+ * @init_vect_0_l: Initialization vector 0l register
+ * @init_vect_0_r: Initialization vector 0r register
+ * @init_vect_1_l: Initialization vector 1l register
+ * @init_vect_1_r: Initialization vector 0r register
+ * @din: Data in register
+ * @dout: Data out register
+ *
+ * CRYP power management specifc structure.
+ */
+struct cryp_device_context {
+ u32 cr;
+ u32 dmacr;
+ u32 imsc;
+
+ u32 key_1_l;
+ u32 key_1_r;
+ u32 key_2_l;
+ u32 key_2_r;
+ u32 key_3_l;
+ u32 key_3_r;
+ u32 key_4_l;
+ u32 key_4_r;
+
+ u32 init_vect_0_l;
+ u32 init_vect_0_r;
+ u32 init_vect_1_l;
+ u32 init_vect_1_r;
+
+ u32 din;
+ u32 dout;
+};
+
+struct cryp_dma {
+ dma_cap_mask_t mask;
+ struct completion cryp_dma_complete;
+ struct dma_chan *chan_cryp2mem;
+ struct dma_chan *chan_mem2cryp;
+ struct stedma40_chan_cfg *cfg_cryp2mem;
+ struct stedma40_chan_cfg *cfg_mem2cryp;
+ int sg_src_len;
+ int sg_dst_len;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
+ int nents_src;
+ int nents_dst;
+};
+
+/**
+ * struct cryp_device_data - structure for a cryp device.
+ * @base: Pointer to the hardware base address.
+ * @dev: Pointer to the devices dev structure.
+ * @clk: Pointer to the device's clock control.
+ * @pwr_regulator: Pointer to the device's power control.
+ * @power_status: Current status of the power.
+ * @ctx_lock: Lock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @list_node: For inclusion into a klist.
+ * @dma: The dma structure holding channel configuration.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_spinlock: Spinlock for power_state.
+ * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
+ */
+struct cryp_device_data {
+ struct cryp_register __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct ux500_regulator *pwr_regulator;
+ int power_status;
+ struct spinlock ctx_lock;
+ struct cryp_ctx *current_ctx;
+ struct klist_node list_node;
+ struct cryp_dma dma;
+ bool power_state;
+ struct spinlock power_state_spinlock;
+ bool restore_dev_ctx;
+};
+
+void cryp_wait_until_done(struct cryp_device_data *device_data);
+
+/* Initialization functions */
+
+int cryp_check(struct cryp_device_data *device_data);
+
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen);
+
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
+
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register);
+
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req);
+
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value);
+
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value);
+
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config);
+
+/* Power management funtions */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode);
+
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx);
+
+/* Data transfer and status bits. */
+int cryp_is_logic_busy(struct cryp_device_data *device_data);
+
+int cryp_get_status(struct cryp_device_data *device_data);
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data to write.
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
+
+#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
new file mode 100644
index 00000000000..5893abb57dc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -0,0 +1,2313 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/klist.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/semaphore.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+
+#include <plat/ste_dma40.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+#define CRYP_MAX_KEY_SIZE 32
+#define BYTES_PER_WORD 4
+
+static int cryp_mode;
+static atomic_t session_id;
+
+static struct stedma40_chan_cfg *mem_to_engine;
+static struct stedma40_chan_cfg *engine_to_mem;
+
+/**
+ * struct cryp_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct cryp_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+/**
+ * struct cryp_ctx - Crypto context
+ * @config: Crypto mode.
+ * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @keylen: Length of key.
+ * @iv: Pointer to initialization vector.
+ * @indata: Pointer to indata.
+ * @outdata: Pointer to outdata.
+ * @datalen: Length of indata.
+ * @outlen: Length of outdata.
+ * @blocksize: Size of blocks.
+ * @updated: Updated flag.
+ * @dev_ctx: Device dependent context.
+ * @device: Pointer to the device.
+ */
+struct cryp_ctx {
+ struct cryp_config config;
+ u8 key[CRYP_MAX_KEY_SIZE];
+ u32 keylen;
+ u8 *iv;
+ const u8 *indata;
+ u8 *outdata;
+ u32 datalen;
+ u32 outlen;
+ u32 blocksize;
+ u8 updated;
+ struct cryp_device_context dev_ctx;
+ struct cryp_device_data *device;
+ u32 session_id;
+};
+
+static struct cryp_driver_data driver_data;
+
+/**
+ * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
+ * @in: Data to convert.
+ */
+static inline u32 uint8p_to_uint32_be(u8 *in)
+{
+ return (u32)in[0]<<24 |
+ ((u32)in[1]<<16) |
+ ((u32)in[2]<<8) |
+ ((u32)in[3]);
+}
+
+/**
+ * swap_bits_in_byte - mirror the bits in a byte
+ * @b: the byte to be mirrored
+ *
+ * The bits are swapped the following way:
+ * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
+ * nibble 2 (n2) bits 4-7.
+ *
+ * Nibble 1 (n1):
+ * (The "old" (moved) bit is replaced with a zero)
+ * 1. Move bit 6 and 7, 4 positions to the left.
+ * 2. Move bit 3 and 5, 2 positions to the left.
+ * 3. Move bit 1-4, 1 position to the left.
+ *
+ * Nibble 2 (n2):
+ * 1. Move bit 0 and 1, 4 positions to the right.
+ * 2. Move bit 2 and 4, 2 positions to the right.
+ * 3. Move bit 3-6, 1 position to the right.
+ *
+ * Combine the two nibbles to a complete and swapped byte.
+ */
+
+static inline u8 swap_bits_in_byte(u8 b)
+{
+#define R_SHIFT_4_MASK (0xc0) /* Bits 6 and 7, right shift 4 */
+#define R_SHIFT_2_MASK (0x28) /* (After right shift 4) Bits 3 and 5,
+ right shift 2 */
+#define R_SHIFT_1_MASK (0x1e) /* (After right shift 2) Bits 1-4,
+ right shift 1 */
+#define L_SHIFT_4_MASK (0x03) /* Bits 0 and 1, left shift 4 */
+#define L_SHIFT_2_MASK (0x14) /* (After left shift 4) Bits 2 and 4,
+ left shift 2 */
+#define L_SHIFT_1_MASK (0x78) /* (After left shift 1) Bits 3-6,
+ left shift 1 */
+
+ u8 n1;
+ u8 n2;
+
+ /* Swap most significant nibble */
+ /* Right shift 4, bits 6 and 7 */
+ n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
+ /* Right shift 2, bits 3 and 5 */
+ n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
+ /* Right shift 1, bits 1-4 */
+ n1 = (n1 & R_SHIFT_1_MASK) >> 1;
+
+ /* Swap least significant nibble */
+ /* Left shift 4, bits 0 and 1 */
+ n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
+ /* Left shift 2, bits 2 and 4 */
+ n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
+ /* Left shift 1, bits 3-6 */
+ n2 = (n2 & L_SHIFT_1_MASK) << 1;
+
+ return n1 | n2;
+}
+
+static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
+ u8 *out, u32 len)
+{
+ unsigned int i = 0;
+ int j;
+ int index = 0;
+
+ j = len - BYTES_PER_WORD;
+ while (j >= 0) {
+ for (i = 0; i < BYTES_PER_WORD; i++) {
+ index = len - j - BYTES_PER_WORD + i;
+ out[j + i] =
+ swap_bits_in_byte(in[index]);
+ }
+ j -= BYTES_PER_WORD;
+ }
+}
+
+static void add_session_id(struct cryp_ctx *ctx)
+{
+ /*
+ * We never want 0 to be a valid value, since this is the default value
+ * for the software context.
+ */
+ if (unlikely(atomic_inc_and_test(&session_id)))
+ atomic_inc(&session_id);
+
+ ctx->session_id = atomic_read(&session_id);
+}
+
+static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+{
+ struct cryp_ctx *ctx;
+ int i;
+ struct cryp_device_data *device_data;
+
+ if (param == NULL) {
+ BUG_ON(!param);
+ return IRQ_HANDLED;
+ }
+
+ /* The device is coming from the one found in hw_crypt_noxts. */
+ device_data = (struct cryp_device_data *)param;
+
+ ctx = device_data->current_ctx;
+
+ if (ctx == NULL) {
+ BUG_ON(!ctx);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
+ cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
+ "out" : "in");
+
+ if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+ if (ctx->outlen / ctx->blocksize > 0) {
+ for (i = 0; i < ctx->blocksize / 4; i++) {
+ cryp_read_outdata(device_data,
+ (u32 *)ctx->outdata);
+ ctx->outdata += 4;
+ ctx->outlen -= 4;
+ }
+
+ if (ctx->outlen == 0) {
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+ }
+ }
+ } else if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO)) {
+ if (ctx->datalen / ctx->blocksize > 0) {
+ for (i = 0 ; i < ctx->blocksize / 4; i++) {
+ cryp_write_indata(device_data,
+ *((u32 *)ctx->indata));
+ ctx->indata += 4;
+ ctx->datalen -= 4;
+ }
+
+ if (ctx->datalen == 0)
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO);
+
+ if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
+ CRYP_PUT_BITS(&device_data->base->cr,
+ CRYP_START_ENABLE,
+ CRYP_CR_START_POS,
+ CRYP_CR_START_MASK);
+
+ cryp_wait_until_done(device_data);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mode_is_aes(enum cryp_algo_mode mode)
+{
+ return (CRYP_ALGO_AES_ECB == mode) ||
+ (CRYP_ALGO_AES_CBC == mode) ||
+ (CRYP_ALGO_AES_CTR == mode) ||
+ (CRYP_ALGO_AES_XTS == mode);
+}
+
+static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
+ enum cryp_init_vector_index index)
+{
+ struct cryp_init_vector_value vector_value;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ vector_value.init_value_left = left;
+ vector_value.init_value_right = right;
+
+ return cryp_configure_init_vector(device_data,
+ index,
+ vector_value);
+}
+
+static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
+{
+ int i;
+ int status = 0;
+ int num_of_regs = ctx->blocksize / 8;
+ u32 iv[AES_BLOCK_SIZE / 4];
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ /*
+ * Since we loop on num_of_regs we need to have a check in case
+ * someone provides an incorrect blocksize which would force calling
+ * cfg_iv with i greater than 2 which is an error.
+ */
+ if (num_of_regs > 2) {
+ dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
+ __func__, ctx->blocksize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ctx->blocksize / 4; i++)
+ iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+
+ for (i = 0; i < num_of_regs; i++) {
+ status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
+ (enum cryp_init_vector_index) i);
+ if (status != 0)
+ return status;
+ }
+ return status;
+}
+
+static int set_key(struct cryp_device_data *device_data,
+ u32 left_key,
+ u32 right_key,
+ enum cryp_key_reg_index index)
+{
+ struct cryp_key_value key_value;
+ int cryp_error;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ key_value.key_value_left = left_key;
+ key_value.key_value_right = right_key;
+
+ cryp_error = cryp_configure_key_values(device_data,
+ index,
+ key_value);
+ if (cryp_error != 0)
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_configure_key_values() failed!", __func__);
+
+ return cryp_error;
+}
+
+static int cfg_keys(struct cryp_ctx *ctx)
+{
+ int i;
+ int num_of_regs = ctx->keylen / 8;
+ u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+ int cryp_error = 0;
+
+ dev_dbg(ctx->device->dev, "[%s]", __func__);
+
+ if (mode_is_aes(ctx->config.algomode)) {
+ swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+ (u8 *)swapped_key,
+ ctx->keylen);
+ } else {
+ for (i = 0; i < ctx->keylen / 4; i++)
+ swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+ }
+
+ for (i = 0; i < num_of_regs; i++) {
+ cryp_error = set_key(ctx->device,
+ *(((u32 *)swapped_key)+i*2),
+ *(((u32 *)swapped_key)+i*2+1),
+ (enum cryp_key_reg_index) i);
+
+ if (cryp_error != 0) {
+ dev_err(ctx->device->dev, "[%s]: set_key() failed!",
+ __func__);
+ return cryp_error;
+ }
+ }
+ return cryp_error;
+}
+
+static int cryp_setup_context(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ u32 control_register = CRYP_CR_DEFAULT;
+
+ switch (cryp_mode) {
+ case CRYP_MODE_INTERRUPT:
+ writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
+ break;
+
+ case CRYP_MODE_DMA:
+ writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ctx->updated == 0) {
+ cryp_flush_inoutfifo(device_data);
+ if (cfg_keys(ctx) != 0) {
+ dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
+ __func__);
+ return -EPERM;
+ }
+
+ if ((ctx->iv) &&
+ (CRYP_ALGO_AES_ECB != ctx->config.algomode) &&
+ (CRYP_ALGO_DES_ECB != ctx->config.algomode) &&
+ (CRYP_ALGO_TDES_ECB != ctx->config.algomode)) {
+ if (cfg_ivs(device_data, ctx) != 0)
+ return -EPERM;
+ }
+
+ cryp_set_configuration(device_data, &ctx->config,
+ &control_register);
+ add_session_id(ctx);
+ } else if (ctx->updated == 1 &&
+ ctx->session_id != atomic_read(&session_id)) {
+ cryp_flush_inoutfifo(device_data);
+ cryp_restore_device_context(device_data, &ctx->dev_ctx);
+
+ add_session_id(ctx);
+ control_register = ctx->dev_ctx.cr;
+ } else
+ control_register = ctx->dev_ctx.cr;
+
+ writel(control_register |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
+ &device_data->base->cr);
+
+ return 0;
+}
+
+static int cryp_get_device_data(struct cryp_ctx *ctx,
+ struct cryp_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct cryp_device_data *local_device_data = NULL;
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct cryp_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
+ struct device *dev)
+{
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2cryp = mem_to_engine;
+ device_data->dma.chan_mem2cryp =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_mem2cryp);
+
+ device_data->dma.cfg_cryp2mem = engine_to_mem;
+ device_data->dma.chan_cryp2mem =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_cryp2mem);
+
+ init_completion(&device_data->dma.cryp_dma_complete);
+}
+
+static void cryp_dma_out_callback(void *data)
+{
+ struct cryp_ctx *ctx = (struct cryp_ctx *) data;
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ complete(&ctx->device->dma.cryp_dma_complete);
+}
+
+static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+ struct scatterlist *sg,
+ int len,
+ enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
+ dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
+ "aligned! Addr: 0x%08x", __func__, (u32)sg);
+ return -EFAULT;
+ }
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ channel = ctx->device->dma.chan_mem2cryp;
+ ctx->device->dma.sg_src = sg;
+ ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.nents_src,
+ direction);
+
+ if (!ctx->device->dma.sg_src_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len,
+ direction,
+ DMA_CTRL_ACK);
+ break;
+
+ case DMA_FROM_DEVICE:
+ channel = ctx->device->dma.chan_cryp2mem;
+ ctx->device->dma.sg_dst = sg;
+ ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.nents_dst,
+ direction);
+
+ if (!ctx->device->dma.sg_dst_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list "
+ "(FROM_DEVICE)", __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(FROM_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len,
+ direction,
+ DMA_CTRL_ACK |
+ DMA_PREP_INTERRUPT);
+
+ desc->callback = cryp_dma_out_callback;
+ desc->callback_param = ctx;
+ break;
+
+ default:
+ dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void cryp_dma_done(struct cryp_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ chan = ctx->device->dma.chan_mem2cryp;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+
+ chan = ctx->device->dma.chan_cryp2mem;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+}
+
+static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
+ int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static void cryp_polling_mode(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int len = ctx->blocksize / BYTES_PER_WORD;
+ int remaining_length = ctx->datalen;
+ u32 *indata = (u32 *)ctx->indata;
+ u32 *outdata = (u32 *)ctx->outdata;
+
+ while (remaining_length > 0) {
+ writesl(&device_data->base->din, indata, len);
+ indata += len;
+ remaining_length -= (len * BYTES_PER_WORD);
+ cryp_wait_until_done(device_data);
+
+ readsl(&device_data->base->dout, outdata, len);
+ outdata += len;
+ cryp_wait_until_done(device_data);
+ }
+}
+
+static int cryp_disable_power(struct device *dev,
+ struct cryp_device_data *device_data,
+ bool save_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state)
+ goto out;
+
+ spin_lock(&device_data->ctx_lock);
+ if (save_device_context && device_data->current_ctx) {
+ cryp_save_device_context(device_data,
+ &device_data->current_ctx->dev_ctx,
+ cryp_mode);
+ device_data->restore_dev_ctx = true;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ clk_disable(device_data->clk);
+ ret = ux500_regulator_atomic_disable(device_data->pwr_regulator);
+ if (ret)
+ dev_err(dev, "[%s]: "
+ "regulator_disable() failed!",
+ __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int cryp_enable_power(
+ struct device *dev,
+ struct cryp_device_data *device_data,
+ bool restore_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state) {
+ ret = ux500_regulator_atomic_enable(device_data->pwr_regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ux500_regulator_atomic_disable(
+ device_data->pwr_regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_ctx) {
+ spin_lock(&device_data->ctx_lock);
+ if (restore_device_context && device_data->current_ctx) {
+ device_data->restore_dev_ctx = false;
+ cryp_restore_device_context(device_data,
+ &device_data->current_ctx->dev_ctx);
+ }
+ spin_unlock(&device_data->ctx_lock);
+ }
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int hw_crypt_noxts(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int ret = 0;
+
+ const u8 *indata = ctx->indata;
+ u8 *outdata = ctx->outdata;
+ u32 datalen = ctx->datalen;
+ u32 outlen = datalen;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->outlen = ctx->datalen;
+
+ if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
+ pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)indata);
+ return -EINVAL;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+
+ if (ret)
+ goto out;
+
+ if (cryp_mode == CRYP_MODE_INTERRUPT) {
+ cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+
+ /*
+ * ctx->outlen is decremented in the cryp_interrupt_handler
+ * function. We had to add cpu_relax() (barrier) to make sure
+ * that gcc didn't optimze away this variable.
+ */
+ while (ctx->outlen > 0)
+ cpu_relax();
+ } else if (cryp_mode == CRYP_MODE_POLLING ||
+ cryp_mode == CRYP_MODE_DMA) {
+ /*
+ * The reason for having DMA in this if case is that if we are
+ * running cryp_mode = 2, then we separate DMA routines for
+ * handling cipher/plaintext > blocksize, except when
+ * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
+ * the polling mode. Overhead of doing DMA setup eats up the
+ * benefits using it.
+ */
+ cryp_polling_mode(ctx, device_data);
+ } else {
+ dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out:
+ ctx->indata = indata;
+ ctx->outdata = outdata;
+ ctx->datalen = datalen;
+ ctx->outlen = outlen;
+
+ return ret;
+}
+
+static int get_nents(struct scatterlist *sg, int nbytes)
+{
+ int nents = 0;
+
+ while (nbytes > 0) {
+ nbytes -= sg->length;
+ sg = scatterwalk_sg_next(sg);
+ nents++;
+ }
+
+ return nents;
+}
+
+static int ablk_dma_crypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+
+ int bytes_written = 0;
+ int bytes_read = 0;
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->datalen = areq->nbytes;
+ ctx->outlen = areq->nbytes;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+ if (ret)
+ goto out_power;
+
+ /* We have the device now, so store the nents in the dma struct. */
+ ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
+ ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
+
+ /* Enable DMA in- and output. */
+ cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
+
+ bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
+ bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
+
+ wait_for_completion(&ctx->device->dma.cryp_dma_complete);
+ cryp_dma_done(ctx);
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out_power:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+
+out:
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ if (unlikely(bytes_written != bytes_read))
+ return -EPERM;
+
+ return 0;
+}
+
+static int ablk_crypt(struct ablkcipher_request *areq)
+{
+ struct ablkcipher_walk walk;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+ unsigned long src_paddr;
+ unsigned long dst_paddr;
+ int ret;
+ int nbytes;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out_power;
+ }
+
+ ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
+ ret = ablkcipher_walk_phys(areq, &walk);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ while ((nbytes = walk.nbytes) > 0) {
+ ctx->iv = walk.iv;
+ src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ ctx->indata = phys_to_virt(src_paddr);
+
+ dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ ctx->outdata = phys_to_virt(dst_paddr);
+
+ ctx->datalen = nbytes - (nbytes % ctx->blocksize);
+
+ ret = hw_crypt_noxts(ctx, device_data);
+ if (ret)
+ goto out_power;
+
+ nbytes -= ctx->datalen;
+ ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ if (ret)
+ goto out_power;
+ }
+ ablkcipher_walk_complete(&walk);
+
+out_power:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+out:
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ break;
+
+ case AES_KEYSIZE_192:
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ break;
+
+ case AES_KEYSIZE_256:
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+ break;
+
+ default:
+ pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+
+ return 0;
+}
+
+static int aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)key, 4))) {
+ dev_err(ctx->device->dev, "[%s]: key isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)key);
+ return -EFAULT;
+ }
+
+ /* For CTR mode */
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s] invalid keylen", __func__);
+ return -EINVAL;
+ }
+
+ if (keylen == AES_KEYSIZE_128)
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ else if (keylen == AES_KEYSIZE_192)
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ else if (keylen == AES_KEYSIZE_256)
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Checking key interdependency for weak key detection. */
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int cryp_hw_calculate(struct cryp_ctx *ctx)
+{
+ struct cryp_device_data *device_data;
+ int ret;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (hw_crypt_noxts(ctx, device_data))
+ dev_err(device_data->dev, "[%s]: hw_crypt_noxts() failed!",
+ __func__);
+
+out:
+ if (cryp_disable_power(device_data->dev, device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_disable_power() failed!", __func__);
+
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des3_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static void des3_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+
+ ctx->indata = in;
+ ctx->outdata = out;
+ ctx->datalen = ctx->blocksize;
+
+ if (cryp_hw_calculate(ctx))
+ pr_err("ux500_cryp:crypX: [%s]: cryp_hw_calculate() failed!",
+ __func__);
+}
+
+static int aes_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_ECB;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CBC;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CBC;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ctr_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CTR;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int aes_ctr_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_AES_CTR;
+ ctx->blocksize = AES_BLOCK_SIZE;
+
+ /* Only DMA for ablkcipher, since givcipher not yet supported */
+ if ((cryp_mode == CRYP_MODE_DMA) &&
+ (*flags & CRYPTO_ALG_TYPE_ABLKCIPHER))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int des_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_ECB;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_CBC;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_DES_CBC;
+ ctx->blocksize = DES_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_ecb_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_ecb_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_ECB;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_cbc_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_CBC;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+static int des3_cbc_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+ ctx->config.algomode = CRYP_ALGO_TDES_CBC;
+ ctx->blocksize = DES3_EDE_BLOCK_SIZE;
+
+ /*
+ * Run the non DMA version also for DMA, since DMA is currently not
+ * working for DES.
+ */
+ return ablk_crypt(areq);
+}
+
+/**
+ * struct crypto_alg aes_alg
+ */
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_setkey,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_alg
+ */
+static struct crypto_alg des_alg = {
+ .cra_name = "des",
+ .cra_driver_name = "des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES_KEY_SIZE,
+ .cia_max_keysize = DES_KEY_SIZE,
+ .cia_setkey = des_setkey,
+ .cia_encrypt = des_encrypt,
+ .cia_decrypt = des_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_alg
+ */
+static struct crypto_alg des3_alg = {
+ .cra_name = "des3_ede",
+ .cra_driver_name = "des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = DES3_EDE_KEY_SIZE,
+ .cia_max_keysize = DES3_EDE_KEY_SIZE,
+ .cia_setkey = des3_setkey,
+ .cia_encrypt = des3_encrypt,
+ .cia_decrypt = des3_decrypt
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_ecb_alg
+ */
+static struct crypto_alg aes_ecb_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_ecb_encrypt,
+ .decrypt = aes_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_cbc_alg
+ */
+static struct crypto_alg aes_cbc_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_cbc_encrypt,
+ .decrypt = aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg aes_ctr_alg
+ */
+static struct crypto_alg aes_ctr_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_ctr_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = aes_ctr_encrypt,
+ .decrypt = aes_ctr_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_ecb_alg
+ */
+static struct crypto_alg des_ecb_alg = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = des_ecb_encrypt,
+ .decrypt = des_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des_cbc_alg
+ */
+static struct crypto_alg des_cbc_alg = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = des_cbc_encrypt,
+ .decrypt = des_cbc_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_ecb_alg
+ */
+static struct crypto_alg des3_ecb_alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = des3_ecb_encrypt,
+ .decrypt = des3_ecb_decrypt,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg des3_cbc_alg
+ */
+static struct crypto_alg des3_cbc_alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3_ede-ux500",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(des3_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = des3_cbc_encrypt,
+ .decrypt = des3_cbc_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }
+ }
+};
+
+/**
+ * struct crypto_alg *ux500_cryp_algs[] -
+ */
+static struct crypto_alg *ux500_cryp_algs[] = {
+ &aes_alg,
+ &des_alg,
+ &des3_alg,
+ &aes_ecb_alg,
+ &aes_cbc_alg,
+ &aes_ctr_alg,
+ &des_ecb_alg,
+ &des_cbc_alg,
+ &des3_ecb_alg,
+ &des3_cbc_alg,
+};
+
+/**
+ * cryp_algs_register_all -
+ */
+static int cryp_algs_register_all(void)
+{
+ int ret;
+ int i;
+ int count;
+
+ pr_debug("[%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++) {
+ ret = crypto_register_alg(ux500_cryp_algs[i]);
+ if (ret) {
+ count = i;
+ pr_err("[%s] alg registration failed",
+ ux500_cryp_algs[i]->cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_alg(ux500_cryp_algs[i]);
+ return ret;
+}
+
+/**
+ * cryp_algs_unregister_all -
+ */
+static void cryp_algs_unregister_all(void)
+{
+ int i;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(ux500_cryp_algs); i++)
+ crypto_unregister_alg(ux500_cryp_algs[i]);
+}
+
+static int ux500_cryp_probe(struct platform_device *pdev)
+{
+ int ret;
+ int cryp_error = 0;
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+ struct cryp_protection_config prot = {
+ .privilege_access = CRYP_STATE_ENABLE
+ };
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "[%s]", __func__);
+ device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_err(dev, "[%s]: kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ /* Grab the DMA configuration from platform data. */
+ mem_to_engine = &((struct cryp_platform_data *)
+ dev->platform_data)->mem_to_engine;
+ engine_to_mem = &((struct cryp_platform_data *)
+ dev->platform_data)->engine_to_mem;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "[%s]: platform_get_resource() failed",
+ __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(dev, "[%s]: request_mem_region() failed",
+ __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s]: ioremap failed!", __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_spinlock);
+
+ /* Enable power for CRYP hardware block */
+ device_data->pwr_regulator = ux500_regulator_get(&pdev->dev);
+ if (IS_ERR(device_data->pwr_regulator)) {
+ dev_err(dev, "[%s]: could not get cryp regulator", __func__);
+ ret = PTR_ERR(device_data->pwr_regulator);
+ device_data->pwr_regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clk for CRYP hardware block */
+ device_data->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s]: clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ cryp_error = cryp_check(device_data);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_init() failed!", __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ cryp_error = cryp_configure_protection(device_data, &prot);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_configure_protection() failed!",
+ __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
+ __func__);
+ goto out_power;
+ }
+
+ ret = request_irq(res_irq->start,
+ cryp_interrupt_handler,
+ 0,
+ "cryp1",
+ device_data);
+ if (ret) {
+ dev_err(dev, "[%s]: Unable to request IRQ", __func__);
+ goto out_power;
+ }
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ atomic_set(&session_id, 1);
+
+ ret = cryp_algs_register_all();
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(dev, "[%s]: cryp_disable_power() failed!", __func__);
+
+ return 0;
+
+out_power:
+ cryp_disable_power(&pdev->dev, device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ ux500_regulator_put(device_data->pwr_regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+static int ux500_cryp_remove(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ ux500_regulator_put(device_data->pwr_regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ kfree(device_data);
+
+ return 0;
+}
+
+static void ux500_cryp_shutdown(struct platform_device *pdev)
+{
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+}
+
+static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ /* Handle state? */
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else
+ disable_irq(res_irq->start);
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = cryp_disable_power(&pdev->dev, device_data, false);
+
+ } else
+ ret = cryp_disable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
+
+ return ret;
+}
+
+static int ux500_cryp_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = cryp_enable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
+ __func__);
+ else {
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ enable_irq(res_irq->start);
+ }
+
+ return ret;
+}
+
+static struct platform_driver cryp_driver = {
+ .probe = ux500_cryp_probe,
+ .remove = ux500_cryp_remove,
+ .shutdown = ux500_cryp_shutdown,
+ .suspend = ux500_cryp_suspend,
+ .resume = ux500_cryp_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cryp1"
+ }
+};
+
+static int __init ux500_cryp_mod_init(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+ return platform_driver_register(&cryp_driver);
+}
+
+static void __exit ux500_cryp_mod_fini(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ platform_driver_unregister(&cryp_driver);
+ return;
+}
+
+module_init(ux500_cryp_mod_init);
+module_exit(ux500_cryp_mod_fini);
+
+module_param(cryp_mode, int, 0);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
+MODULE_ALIAS("aes-all");
+MODULE_ALIAS("des-all");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
new file mode 100644
index 00000000000..08d291cdbe6
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+
+#include "cryp.h"
+#include "cryp_p.h"
+#include "cryp_irq.h"
+#include "cryp_irqp.h"
+
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i | irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i & ~irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
new file mode 100644
index 00000000000..5a7837f1b8f
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_IRQ_H_
+#define _CRYP_IRQ_H_
+
+#include "cryp.h"
+
+enum cryp_irq_src_id {
+ CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
+ CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
+ CRYP_IRQ_SRC_ALL = 0x3
+};
+
+/**
+ * M0 Funtions
+ */
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+#endif /* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
new file mode 100644
index 00000000000..8b339cc34bf
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __CRYP_IRQP_H_
+#define __CRYP_IRQP_H_
+
+#include "cryp_irq.h"
+
+/**
+ *
+ * CRYP Registers - Offset mapping
+ * +-----------------+
+ * 00h | CRYP_CR | Configuration register
+ * +-----------------+
+ * 04h | CRYP_SR | Status register
+ * +-----------------+
+ * 08h | CRYP_DIN | Data In register
+ * +-----------------+
+ * 0ch | CRYP_DOUT | Data out register
+ * +-----------------+
+ * 10h | CRYP_DMACR | DMA control register
+ * +-----------------+
+ * 14h | CRYP_IMSC | IMSC
+ * +-----------------+
+ * 18h | CRYP_RIS | Raw interrupt status
+ * +-----------------+
+ * 1ch | CRYP_MIS | Masked interrupt status.
+ * +-----------------+
+ * Key registers
+ * IVR registers
+ * Peripheral
+ * Cell IDs
+ *
+ * Refer data structure for other register map
+ */
+
+/**
+ * struct cryp_register
+ * @cr - Configuration register
+ * @status - Status register
+ * @din - Data input register
+ * @din_size - Data input size register
+ * @dout - Data output register
+ * @dout_size - Data output size register
+ * @dmacr - Dma control register
+ * @imsc - Interrupt mask set/clear register
+ * @ris - Raw interrupt status
+ * @mis - Masked interrupt statu register
+ * @key_1_l - Key register 1 L
+ * @key_1_r - Key register 1 R
+ * @key_2_l - Key register 2 L
+ * @key_2_r - Key register 2 R
+ * @key_3_l - Key register 3 L
+ * @key_3_r - Key register 3 R
+ * @key_4_l - Key register 4 L
+ * @key_4_r - Key register 4 R
+ * @init_vect_0_l - init vector 0 L
+ * @init_vect_0_r - init vector 0 R
+ * @init_vect_1_l - init vector 1 L
+ * @init_vect_1_r - init vector 1 R
+ * @cryp_unused1 - unused registers
+ * @itcr - Integration test control register
+ * @itip - Integration test input register
+ * @itop - Integration test output register
+ * @cryp_unused2 - unused registers
+ * @periphId0 - FE0 CRYP Peripheral Identication Register
+ * @periphId1 - FE4
+ * @periphId2 - FE8
+ * @periphId3 - FEC
+ * @pcellId0 - FF0 CRYP PCell Identication Register
+ * @pcellId1 - FF4
+ * @pcellId2 - FF8
+ * @pcellId3 - FFC
+ */
+struct cryp_register {
+ u32 cr; /* Configuration register */
+ u32 sr; /* Status register */
+ u32 din; /* Data input register */
+ u32 din_size; /* Data input size register */
+ u32 dout; /* Data output register */
+ u32 dout_size; /* Data output size register */
+ u32 dmacr; /* Dma control register */
+ u32 imsc; /* Interrupt mask set/clear register */
+ u32 ris; /* Raw interrupt status */
+ u32 mis; /* Masked interrupt statu register */
+
+ u32 key_1_l; /*Key register 1 L */
+ u32 key_1_r; /*Key register 1 R */
+ u32 key_2_l; /*Key register 2 L */
+ u32 key_2_r; /*Key register 2 R */
+ u32 key_3_l; /*Key register 3 L */
+ u32 key_3_r; /*Key register 3 R */
+ u32 key_4_l; /*Key register 4 L */
+ u32 key_4_r; /*Key register 4 R */
+
+ u32 init_vect_0_l; /*init vector 0 L */
+ u32 init_vect_0_r; /*init vector 0 R */
+ u32 init_vect_1_l; /*init vector 1 L */
+ u32 init_vect_1_r; /*init vector 1 R */
+
+ u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */
+ u32 itcr; /*Integration test control register */
+ u32 itip; /*Integration test input register */
+ u32 itop; /*Integration test output register */
+ u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */
+
+ u32 periphId0; /* FE0 CRYP Peripheral Identication Register */
+ u32 periphId1; /* FE4 */
+ u32 periphId2; /* FE8 */
+ u32 periphId3; /* FEC */
+
+ u32 pcellId0; /* FF0 CRYP PCell Identication Register */
+ u32 pcellId1; /* FF4 */
+ u32 pcellId2; /* FF8 */
+ u32 pcellId3; /* FFC */
+};
+
+#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
new file mode 100644
index 00000000000..0e070829edc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_P_H_
+#define _CRYP_P_H_
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include "cryp.h"
+#include "cryp_irqp.h"
+
+/**
+ * Generic Macros
+ */
+#define CRYP_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define CRYP_WRITE_BIT(reg_name, val, mask) \
+ writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
+ ((val) & (mask))), reg_name)
+
+#define CRYP_TEST_BITS(reg_name, val) \
+ (readl_relaxed(reg_name) & (val))
+
+#define CRYP_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+/**
+ * CRYP specific Macros
+ */
+#define CRYP_PERIPHERAL_ID0 0xE3
+#define CRYP_PERIPHERAL_ID1 0x05
+
+#define CRYP_PERIPHERAL_ID2_DB8500 0x28
+#define CRYP_PERIPHERAL_ID2_DB5500 0x29
+#define CRYP_PERIPHERAL_ID3 0x00
+
+#define CRYP_PCELL_ID0 0x0D
+#define CRYP_PCELL_ID1 0xF0
+#define CRYP_PCELL_ID2 0x05
+#define CRYP_PCELL_ID3 0xB1
+
+/**
+ * CRYP register default values
+ */
+#define MAX_DEVICE_SUPPORT 2
+
+/* Priv set, keyrden set and datatype 8bits swapped set as default. */
+#define CRYP_CR_DEFAULT 0x0482
+#define CRYP_DMACR_DEFAULT 0x0
+#define CRYP_IMSC_DEFAULT 0x0
+#define CRYP_DIN_DEFAULT 0x0
+#define CRYP_DOUT_DEFAULT 0x0
+#define CRYP_KEY_DEFAULT 0x0
+#define CRYP_INIT_VECT_DEFAULT 0x0
+
+/**
+ * CRYP Control register specific mask
+ */
+#define CRYP_CR_SECURE_MASK BIT(0)
+#define CRYP_CR_PRLG_MASK BIT(1)
+#define CRYP_CR_ALGODIR_MASK BIT(2)
+#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3))
+#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6))
+#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8))
+#define CRYP_CR_KEYRDEN_MASK BIT(10)
+#define CRYP_CR_KSE_MASK BIT(11)
+#define CRYP_CR_START_MASK BIT(12)
+#define CRYP_CR_INIT_MASK BIT(13)
+#define CRYP_CR_FFLUSH_MASK BIT(14)
+#define CRYP_CR_CRYPEN_MASK BIT(15)
+#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\
+ CRYP_CR_PRLG_MASK |\
+ CRYP_CR_ALGODIR_MASK |\
+ CRYP_CR_ALGOMODE_MASK |\
+ CRYP_CR_DATATYPE_MASK |\
+ CRYP_CR_KEYSIZE_MASK |\
+ CRYP_CR_KEYRDEN_MASK |\
+ CRYP_CR_DATATYPE_MASK)
+
+
+#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1))
+#define CRYP_SR_IFEM_MASK BIT(0)
+#define CRYP_SR_BUSY_MASK BIT(4)
+
+/**
+ * Bit position used while setting bits in register
+ */
+#define CRYP_CR_PRLG_POS 1
+#define CRYP_CR_ALGODIR_POS 2
+#define CRYP_CR_ALGOMODE_POS 3
+#define CRYP_CR_DATATYPE_POS 6
+#define CRYP_CR_KEYSIZE_POS 8
+#define CRYP_CR_KEYRDEN_POS 10
+#define CRYP_CR_KSE_POS 11
+#define CRYP_CR_START_POS 12
+#define CRYP_CR_INIT_POS 13
+#define CRYP_CR_CRYPEN_POS 15
+
+#define CRYP_SR_BUSY_POS 4
+
+/**
+ * CRYP PCRs------PC_NAND control register
+ * BIT_MASK
+ */
+#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0))
+#define CRYP_DMA_REQ_MASK_POS 0
+
+
+struct cryp_system_context {
+ /* CRYP Register structure */
+ struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
+};
+
+#endif
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
new file mode 100644
index 00000000000..b2f90d9bac7
--- /dev/null
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_hash_core.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
+ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
new file mode 100644
index 00000000000..b8619ea4a27
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen (shujuan.chen@stericsson.com)
+ * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
+ * Author: Berne Hebark (berne.hebark@stericsson.com))
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _HASH_ALG_H
+#define _HASH_ALG_H
+
+#include <linux/bitops.h>
+
+#define HASH_BLOCK_SIZE 64
+#define HASH_DMA_ALIGN_SIZE 4
+#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
+#define HASH_BYTES_PER_WORD 4
+
+/* Maximum value of the length's high word */
+#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
+
+/* Power on Reset values HASH registers */
+#define HASH_RESET_CR_VALUE 0x0
+#define HASH_RESET_STR_VALUE 0x0
+
+/* Number of context swap registers */
+#define HASH_CSR_COUNT 52
+
+#define HASH_RESET_CSRX_REG_VALUE 0x0
+#define HASH_RESET_CSFULL_REG_VALUE 0x0
+#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
+
+#define HASH_RESET_INDEX_VAL 0x0
+#define HASH_RESET_BIT_INDEX_VAL 0x0
+#define HASH_RESET_BUFFER_VAL 0x0
+#define HASH_RESET_LEN_HIGH_VAL 0x0
+#define HASH_RESET_LEN_LOW_VAL 0x0
+
+/* Control register bitfields */
+#define HASH_CR_RESUME_MASK 0x11FCF
+
+#define HASH_CR_SWITCHON_POS 31
+#define HASH_CR_SWITCHON_MASK BIT(31)
+
+#define HASH_CR_EMPTYMSG_POS 20
+#define HASH_CR_EMPTYMSG_MASK BIT(20)
+
+#define HASH_CR_DINF_POS 12
+#define HASH_CR_DINF_MASK BIT(12)
+
+#define HASH_CR_NBW_POS 8
+#define HASH_CR_NBW_MASK 0x00000F00UL
+
+#define HASH_CR_LKEY_POS 16
+#define HASH_CR_LKEY_MASK BIT(16)
+
+#define HASH_CR_ALGO_POS 7
+#define HASH_CR_ALGO_MASK BIT(7)
+
+#define HASH_CR_MODE_POS 6
+#define HASH_CR_MODE_MASK BIT(6)
+
+#define HASH_CR_DATAFORM_POS 4
+#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
+
+#define HASH_CR_DMAE_POS 3
+#define HASH_CR_DMAE_MASK BIT(3)
+
+#define HASH_CR_INIT_POS 2
+#define HASH_CR_INIT_MASK BIT(2)
+
+#define HASH_CR_PRIVN_POS 1
+#define HASH_CR_PRIVN_MASK BIT(1)
+
+#define HASH_CR_SECN_POS 0
+#define HASH_CR_SECN_MASK BIT(0)
+
+/* Start register bitfields */
+#define HASH_STR_DCAL_POS 8
+#define HASH_STR_DCAL_MASK BIT(8)
+#define HASH_STR_DEFAULT 0x0
+
+#define HASH_STR_NBLW_POS 0
+#define HASH_STR_NBLW_MASK 0x0000001FUL
+
+#define HASH_NBLW_MAX_VAL 0x1F
+
+/* PrimeCell IDs */
+#define HASH_P_ID0 0xE0
+#define HASH_P_ID1 0x05
+#define HASH_P_ID2 0x38
+#define HASH_P_ID3 0x00
+#define HASH_CELL_ID0 0x0D
+#define HASH_CELL_ID1 0xF0
+#define HASH_CELL_ID2 0x05
+#define HASH_CELL_ID3 0xB1
+
+#define HASH_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define HASH_CLEAR_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
+
+#define HASH_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len))
+
+#define HASH_INITIALIZE \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ 0x01, HASH_CR_INIT_POS, \
+ HASH_CR_INIT_MASK)
+
+#define HASH_SET_DATA_FORMAT(data_format) \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ (u32) (data_format), HASH_CR_DATAFORM_POS, \
+ HASH_CR_DATAFORM_MASK)
+#define HASH_SET_NBLW(val) \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ (u32) (val), HASH_STR_NBLW_POS, \
+ HASH_STR_NBLW_MASK)
+#define HASH_SET_DCAL \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ 0x01, HASH_STR_DCAL_POS, \
+ HASH_STR_DCAL_MASK)
+
+/* Hardware access method */
+enum hash_mode {
+ HASH_MODE_CPU,
+ HASH_MODE_DMA
+};
+
+/**
+ * struct uint64 - Structure to handle 64 bits integers.
+ * @high_word: Most significant bits.
+ * @low_word: Least significant bits.
+ *
+ * Used to handle 64 bits integers.
+ */
+struct uint64 {
+ u32 high_word;
+ u32 low_word;
+};
+
+/**
+ * struct hash_register - Contains all registers in u8500 hash hardware.
+ * @cr: HASH control register (0x000).
+ * @din: HASH data input register (0x004).
+ * @str: HASH start register (0x008).
+ * @hx: HASH digest register 0..7 (0x00c-0x01C).
+ * @padding0: Reserved (0x02C).
+ * @itcr: Integration test control register (0x080).
+ * @itip: Integration test input register (0x084).
+ * @itop: Integration test output register (0x088).
+ * @padding1: Reserved (0x08C).
+ * @csfull: HASH context full register (0x0F8).
+ * @csdatain: HASH context swap data input register (0x0FC).
+ * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
+ * @padding2: Reserved (0x1D0).
+ * @periphid0: HASH peripheral identification register 0 (0xFE0).
+ * @periphid1: HASH peripheral identification register 1 (0xFE4).
+ * @periphid2: HASH peripheral identification register 2 (0xFE8).
+ * @periphid3: HASH peripheral identification register 3 (0xFEC).
+ * @cellid0: HASH PCell identification register 0 (0xFF0).
+ * @cellid1: HASH PCell identification register 1 (0xFF4).
+ * @cellid2: HASH PCell identification register 2 (0xFF8).
+ * @cellid3: HASH PCell identification register 3 (0xFFC).
+ *
+ * The device communicates to the HASH via 32-bit-wide control registers
+ * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
+ * with the registers used.
+ */
+struct hash_register {
+ u32 cr;
+ u32 din;
+ u32 str;
+ u32 hx[8];
+
+ u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
+
+ u32 itcr;
+ u32 itip;
+ u32 itop;
+
+ u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
+
+ u32 csfull;
+ u32 csdatain;
+ u32 csrx[HASH_CSR_COUNT];
+
+ u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
+
+ u32 periphid0;
+ u32 periphid1;
+ u32 periphid2;
+ u32 periphid3;
+
+ u32 cellid0;
+ u32 cellid1;
+ u32 cellid2;
+ u32 cellid3;
+};
+
+/**
+ * struct hash_state - Hash context state.
+ * @temp_cr: Temporary HASH Control Register.
+ * @str_reg: HASH Start Register.
+ * @din_reg: HASH Data Input Register.
+ * @csr[52]: HASH Context Swap Registers 0-39.
+ * @csfull: HASH Context Swap Registers 40 ie Status flags.
+ * @csdatain: HASH Context Swap Registers 41 ie Input data.
+ * @buffer: Working buffer for messages going to the hardware.
+ * @length: Length of the part of message hashed so far (floor(N/64) * 64).
+ * @index: Valid number of bytes in buffer (N % 64).
+ * @bit_index: Valid number of bits in buffer (N % 8).
+ *
+ * This structure is used between context switches, i.e. when ongoing jobs are
+ * interupted with new jobs. When this happens we need to store intermediate
+ * results in software.
+ *
+ * WARNING: "index" is the member of the structure, to be sure that "buffer"
+ * is aligned on a 4-bytes boundary. This is highly implementation dependent
+ * and MUST be checked whenever this code is ported on new platforms.
+ */
+struct hash_state {
+ u32 temp_cr;
+ u32 str_reg;
+ u32 din_reg;
+ u32 csr[52];
+ u32 csfull;
+ u32 csdatain;
+ u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
+ struct uint64 length;
+ u8 index;
+ u8 bit_index;
+};
+
+/**
+ * enum hash_device_id - HASH device ID.
+ * @HASH_DEVICE_ID_0: Hash hardware with ID 0
+ * @HASH_DEVICE_ID_1: Hash hardware with ID 1
+ */
+enum hash_device_id {
+ HASH_DEVICE_ID_0 = 0,
+ HASH_DEVICE_ID_1 = 1
+};
+
+/**
+ * enum hash_data_format - HASH data format.
+ * @HASH_DATA_32_BITS: 32 bits data format
+ * @HASH_DATA_16_BITS: 16 bits data format
+ * @HASH_DATA_8_BITS: 8 bits data format.
+ * @HASH_DATA_1_BITS: 1 bit data format.
+ */
+enum hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+/**
+ * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
+ * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
+ * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
+ */
+enum hash_algo {
+ HASH_ALGO_SHA1 = 0x0,
+ HASH_ALGO_SHA256 = 0x1
+};
+
+/**
+ * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
+ * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
+ * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
+ */
+enum hash_op {
+ HASH_OPER_MODE_HASH = 0x0,
+ HASH_OPER_MODE_HMAC = 0x1
+};
+
+/**
+ * struct hash_config - Configuration data for the hardware.
+ * @data_format: Format of data entered into the hash data in register.
+ * @algorithm: Algorithm selection bit.
+ * @oper_mode: Operating mode selection bit.
+ */
+struct hash_config {
+ int data_format;
+ int algorithm;
+ int oper_mode;
+};
+
+/**
+ * struct hash_dma - Structure used for dma.
+ * @mask: DMA capabilities bitmap mask.
+ * @complete: Used to maintain state for a "completion".
+ * @chan_mem2hash: DMA channel.
+ * @cfg_mem2hash: DMA channel configuration.
+ * @sg_len: Scatterlist length.
+ * @sg: Scatterlist.
+ * @nents: Number of sg entries.
+ */
+struct hash_dma {
+ dma_cap_mask_t mask;
+ struct completion complete;
+ struct dma_chan *chan_mem2hash;
+ void *cfg_mem2hash;
+ int sg_len;
+ struct scatterlist *sg;
+ int nents;
+};
+
+/**
+ * struct hash_ctx - The context used for hash calculations.
+ * @key: The key used in the operation.
+ * @keylen: The length of the key.
+ * @updated: Indicates if hardware is initialized for new operations.
+ * @state: The state of the current calculations.
+ * @config: The current configuration.
+ * @digestsize: The size of current digest.
+ * @device: Pointer to the device structure.
+ * @dma_mode: Used in special cases (workaround), e.g. need to change to
+ * cpu mode, if not supported/working in dma mode.
+ */
+struct hash_ctx {
+ u8 *key;
+ u32 keylen;
+ u8 updated;
+ struct hash_state state;
+ struct hash_config config;
+ int digestsize;
+ struct hash_device_data *device;
+ bool dma_mode;
+};
+
+/**
+ * struct hash_device_data - structure for a hash device.
+ * @base: Pointer to the hardware base address.
+ * @list_node: For inclusion in klist.
+ * @dev: Pointer to the device dev structure.
+ * @ctx_lock: Spinlock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_lock: Spinlock for power_state.
+ * @regulator: Pointer to the device's power control.
+ * @clk: Pointer to the device's clock control.
+ * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
+ * @dma: Structure used for dma.
+ */
+struct hash_device_data {
+ struct hash_register __iomem *base;
+ struct klist_node list_node;
+ struct device *dev;
+ struct spinlock ctx_lock;
+ struct hash_ctx *current_ctx;
+ bool power_state;
+ struct spinlock power_state_lock;
+ struct ux500_regulator *regulator;
+ struct clk *clk;
+ bool restore_dev_state;
+ struct hash_dma dma;
+};
+
+int hash_check_hw(struct hash_device_data *device_data);
+
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config);
+
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
+
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm);
+
+int hash_hw_update(struct ahash_request *req);
+
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *state);
+
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *state);
+
+#endif
diff --git a/drivers/crypto/ux500/hash/hash_alg_p.h b/drivers/crypto/ux500/hash/hash_alg_p.h
new file mode 100755
index 00000000000..c85faaeba6f
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg_p.h
@@ -0,0 +1,26 @@
+/*****************************************************************************/
+/**
+* � ST-Ericsson, 2009 - All rights reserved
+* Reproduction and Communication of this document is strictly prohibited
+* unless specifically authorized in writing by ST-Ericsson
+*
+* static Header file of HASH Processor
+* Specification release related to this implementation: A_V2.2
+* AUTHOR : ST-Ericsson
+*/
+/*****************************************************************************/
+
+#ifndef _HASH_P_H_
+#define _HASH_P_H_
+
+/*--------------------------------------------------------------------------*
+ * Includes *
+ *--------------------------------------------------------------------------*/
+#include "hash_alg.h"
+
+/*--------------------------------------------------------------------------*
+ * Defines *
+ *--------------------------------------------------------------------------*/
+
+#endif /* End _HASH_P_H_ */
+
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
new file mode 100644
index 00000000000..05a06b00ca0
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -0,0 +1,2080 @@
+/*
+ * Cryptographic API.
+ * Support for Nomadik hardware crypto engine.
+
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/klist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "hash_alg.h"
+
+#define DEV_DBG_NAME "hashX hashX:"
+
+static int hash_mode;
+module_param(hash_mode, int, 0);
+MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
+
+/**
+ * Pre-calculated empty message digests.
+ */
+static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/* HMAC-SHA1, no key */
+static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
+ 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
+ 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
+ 0x70, 0x69, 0x0e, 0x1d
+};
+
+/* HMAC-SHA256, no key */
+static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
+ 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
+ 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
+ 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
+ 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
+};
+
+/**
+ * struct hash_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct hash_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+static struct hash_driver_data driver_data;
+
+/* Declaration of functions */
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message
+ * @index_bytes: The number of bytes in the last message
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes);
+
+/**
+ * release_hash_device - Releases a previously allocated hash device.
+ * @device_data: Structure for the hash device.
+ *
+ */
+static void release_hash_device(struct hash_device_data *device_data)
+{
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx->device = NULL;
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+}
+
+static void hash_dma_setup_channel(struct hash_device_data *device_data,
+ struct device *dev)
+{
+ struct hash_platform_data *platform_data = dev->platform_data;
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
+ device_data->dma.chan_mem2hash =
+ dma_request_channel(device_data->dma.mask,
+ platform_data->dma_filter,
+ device_data->dma.cfg_mem2hash);
+
+ init_completion(&device_data->dma.complete);
+}
+
+static void hash_dma_callback(void *data)
+{
+ struct hash_ctx *ctx = (struct hash_ctx *) data;
+
+ complete(&ctx->device->dma.complete);
+}
+
+static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
+ int len, enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ if (direction != DMA_TO_DEVICE) {
+ dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
+
+ channel = ctx->device->dma.chan_mem2hash;
+ ctx->device->dma.sg = sg;
+ ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg, ctx->device->dma.nents,
+ direction);
+
+ if (!ctx->device->dma.sg_len) {
+ dev_err(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg, ctx->device->dma.sg_len,
+ direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, ctx);
+ if (!desc) {
+ dev_err(ctx->device->dev,
+ "[%s]: device_prep_slave_sg() failed!", __func__);
+ return -EFAULT;
+ }
+
+ desc->callback = hash_dma_callback;
+ desc->callback_param = ctx;
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void hash_dma_done(struct hash_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ chan = ctx->device->dma.chan_mem2hash;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
+ ctx->device->dma.sg_len, DMA_TO_DEVICE);
+
+}
+
+static int hash_dma_write(struct hash_ctx *ctx,
+ struct scatterlist *sg, int len)
+{
+ int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+/**
+ * get_empty_message_digest - Returns a pre-calculated digest for
+ * the empty message.
+ * @device_data: Structure for the hash device.
+ * @zero_hash: Buffer to return the empty message digest.
+ * @zero_hash_size: Hash size of the empty message digest.
+ * @zero_digest: True if zero_digest returned.
+ */
+static int get_empty_message_digest(
+ struct hash_device_data *device_data,
+ u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = device_data->current_ctx;
+ *zero_digest = false;
+
+ /**
+ * Caller responsible for ctx != NULL.
+ */
+
+ if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 ==
+ ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
+ if (!ctx->keylen) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ dev_dbg(device_data->dev, "[%s] Continue hash "
+ "calculation, since hmac key avalable",
+ __func__);
+ }
+ }
+out:
+
+ return ret;
+}
+
+/**
+ * hash_disable_power - Request to disable power and clock.
+ * @device_data: Structure for the hash device.
+ * @save_device_state: If true, saves the current hw state.
+ *
+ * This function request for disabling power (regulator) and clock,
+ * and could also save current hw state.
+ */
+static int hash_disable_power(
+ struct hash_device_data *device_data,
+ bool save_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state)
+ goto out;
+
+ if (save_device_state && device_data->current_ctx) {
+ hash_save_state(device_data,
+ &device_data->current_ctx->state);
+ device_data->restore_dev_state = true;
+ }
+
+ clk_disable(device_data->clk);
+ ret = ux500_regulator_atomic_disable(device_data->regulator);
+ if (ret)
+ dev_err(dev, "[%s] regulator_disable() failed!", __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_enable_power - Request to enable power and clock.
+ * @device_data: Structure for the hash device.
+ * @restore_device_state: If true, restores a previous saved hw state.
+ *
+ * This function request for enabling power (regulator) and clock,
+ * and could also restore a previously saved hw state.
+ */
+static int hash_enable_power(
+ struct hash_device_data *device_data,
+ bool restore_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state) {
+ ret = ux500_regulator_atomic_enable(device_data->regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ret = ux500_regulator_atomic_disable(
+ device_data->regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_state) {
+ if (restore_device_state) {
+ device_data->restore_dev_state = false;
+ hash_resume_state(device_data,
+ &device_data->current_ctx->state);
+ }
+ }
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_get_device_data - Checks for an available hash device and return it.
+ * @hash_ctx: Structure for the hash context.
+ * @device_data: Structure for the hash device.
+ *
+ * This function check for an available hash device and return it to
+ * the caller.
+ * Note! Caller need to release the device, calling up().
+ */
+static int hash_get_device_data(struct hash_ctx *ctx,
+ struct hash_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct hash_device_data *local_device_data = NULL;
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct hash_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+/**
+ * hash_hw_write_key - Writes the key to the hardware registries.
+ *
+ * @device_data: Structure for the hash device.
+ * @key: Key to be written.
+ * @keylen: The lengt of the key.
+ *
+ * Note! This function DOES NOT write to the NBLW registry, even though
+ * specified in the the hw design spec. Either due to incorrect info in the
+ * spec or due to a bug in the hw.
+ */
+static void hash_hw_write_key(struct hash_device_data *device_data,
+ const u8 *key, unsigned int keylen)
+{
+ u32 word = 0;
+ int nwords = 1;
+
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ while (keylen >= 4) {
+ word = ((u32) (key[3] & 0xff) << 24) |
+ ((u32) (key[2] & 0xff) << 16) |
+ ((u32) (key[1] & 0xff) << 8) |
+ ((u32) (key[0] & 0xff));
+
+ HASH_SET_DIN(&word, nwords);
+ keylen -= 4;
+ key += 4;
+ }
+
+ /* Take care of the remaining bytes in the last word */
+ if (keylen) {
+ word = 0;
+ while (keylen) {
+ word |= (key[keylen - 1] << (8 * (keylen - 1)));
+ keylen--;
+ }
+
+ HASH_SET_DIN(&word, nwords);
+ }
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ HASH_SET_DCAL;
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * init_hash_hw - Initialise the hash hardware for a new calculation.
+ * @device_data: Structure for the hash device.
+ * @ctx: The hash context.
+ *
+ * This function will enable the bits needed to clear and start a new
+ * calculation.
+ */
+static int init_hash_hw(struct hash_device_data *device_data,
+ struct hash_ctx *ctx)
+{
+ int ret = 0;
+
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_setconfiguration() "
+ "failed!", __func__);
+ return ret;
+ }
+
+ hash_begin(device_data, ctx);
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ return ret;
+}
+
+/**
+ * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
+ *
+ * @sg: Scatterlist.
+ * @size: Size in bytes.
+ * @aligned: True if sg data aligned to work in DMA mode.
+ *
+ * Reentrancy: Non Re-entrant
+ */
+static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
+{
+ int nents = 0;
+ bool aligned_data = true;
+
+ while (size > 0 && sg) {
+ nents++;
+ size -= sg->length;
+
+ /* hash_set_dma_transfer will align last nent */
+ if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
+ || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
+ size > 0))
+ aligned_data = false;
+
+ sg = sg_next(sg);
+ }
+
+ if (aligned)
+ *aligned = aligned_data;
+
+ if (size != 0)
+ return -EFAULT;
+
+ return nents;
+}
+
+/**
+ * hash_dma_valid_data - checks for dma valid sg data.
+ * @sg: Scatterlist.
+ * @datasize: Datasize in bytes.
+ *
+ * NOTE! This function checks for dma valid sg data, since dma
+ * only accept datasizes of even wordsize.
+ */
+static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
+{
+ bool aligned;
+
+ /* Need to include at least one nent, else error */
+ if (hash_get_nents(sg, datasize, &aligned) < 1)
+ return false;
+
+ return aligned;
+}
+
+/**
+ * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ *
+ * Initialize structures.
+ */
+static int hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (!ctx->key)
+ ctx->keylen = 0;
+
+ memset(&ctx->state, 0, sizeof(struct hash_state));
+ ctx->updated = 0;
+ if (hash_mode == HASH_MODE_DMA) {
+ if ((ctx->config.oper_mode == HASH_OPER_MODE_HMAC) &&
+ cpu_is_u5500()) {
+ pr_debug(DEV_DBG_NAME " [%s] HMAC and DMA not working "
+ "on u5500, directing to CPU mode.",
+ __func__);
+ ctx->dma_mode = false; /* Don't use DMA in this case */
+ goto out;
+ }
+
+ if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
+ ctx->dma_mode = false; /* Don't use DMA in this case */
+
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
+ "to CPU mode for data size < %d",
+ __func__, HASH_DMA_ALIGN_SIZE);
+ } else {
+ if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
+ hash_dma_valid_data(req->src,
+ req->nbytes)) {
+ ctx->dma_mode = true;
+ } else {
+ ctx->dma_mode = false;
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
+ " CPU mode for datalength < %d"
+ " or non-aligned data, except "
+ "in last nent", __func__,
+ HASH_DMA_PERFORMANCE_MIN_SIZE);
+ }
+ }
+ }
+out:
+ return 0;
+}
+
+/**
+ * hash_processblock - This function processes a single block of 512 bits (64
+ * bytes), word aligned, starting at message.
+ * @device_data: Structure for the hash device.
+ * @message: Block (512 bits) of message to be written to
+ * the HASH hardware.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_processblock(
+ struct hash_device_data *device_data,
+ const u32 *message, int length)
+{
+ int len = length / HASH_BYTES_PER_WORD;
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /*
+ * Write message data to the HASH_DIN register.
+ */
+ HASH_SET_DIN(message, len);
+}
+
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message.
+ * @index_bytes: The number of bytes in the last message.
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ * Reentrancy: Non Re-entrant.
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes)
+{
+ int nwords = 1;
+
+ /*
+ * Clear hash str register, only clear NBLW
+ * since DCAL will be reset by hardware.
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /* Main loop */
+ while (index_bytes >= 4) {
+ HASH_SET_DIN(message, nwords);
+ index_bytes -= 4;
+ message++;
+ }
+
+ if (index_bytes)
+ HASH_SET_DIN(message, nwords);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
+ HASH_SET_NBLW(index_bytes * 8);
+ dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
+ readl_relaxed(&device_data->base->din),
+ (int)(readl_relaxed(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+ HASH_SET_DCAL;
+ dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
+ __func__, readl_relaxed(&device_data->base->din),
+ (int)(readl_relaxed(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * hash_incrementlength - Increments the length of the current message.
+ * @ctx: Hash context
+ * @incr: Length of message processed already
+ *
+ * Overflow cannot occur, because conditions for overflow are checked in
+ * hash_hw_update.
+ */
+static void hash_incrementlength(struct hash_ctx *ctx, u32 incr)
+{
+ ctx->state.length.low_word += incr;
+
+ /* Check for wrap-around */
+ if (ctx->state.length.low_word < incr)
+ ctx->state.length.high_word++;
+}
+
+/**
+ * hash_setconfiguration - Sets the required configuration for the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @config: Pointer to a configuration structure.
+ *
+ * Reentrancy: Non Re-entrant
+ * Reentrancy issues:
+ * 1. Global variable registry(cofiguration register,
+ * parameter register, divider register) is being modified
+ *
+ * Comments 1. : User need to call hash_begin API after calling this
+ * API i.e. the current configuration is set only when
+ * bit INIT is set and we set INIT bit in hash_begin.
+ * Changing the configuration during a computation has
+ * no effect so we first set configuration by calling
+ * this API and then set the INIT bit for the HASH
+ * processor and the curent configuration is taken into
+ * account. As reading INIT bit (with correct protection
+ * rights) will always return 0b so we can't make a check
+ * at software level. So the user has to initialize the
+ * device for new configuration to take in to effect.
+ * 2. The default value of data format is 00b ie the format
+ * of data entered in HASH_DIN register is 32-bit data.
+ * The data written in HASH_DIN is used directly by the
+ * HASH processing, without re ordering.
+ */
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config)
+{
+ int ret = 0;
+
+ if (config->algorithm != HASH_ALGO_SHA1 &&
+ config->algorithm != HASH_ALGO_SHA256)
+ return -EPERM;
+
+ /*
+ * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
+ * to be written to HASH_DIN is considered as 32 bits.
+ */
+ HASH_SET_DATA_FORMAT(config->data_format);
+
+ /*
+ * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
+ */
+ switch (config->algorithm) {
+ case HASH_ALGO_SHA1:
+ HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ case HASH_ALGO_SHA256:
+ HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ default:
+ dev_err(device_data->dev, "[%s] Incorrect algorithm.",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * MODE bit. This bit selects between HASH or HMAC mode for the
+ * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
+ */
+ if (HASH_OPER_MODE_HASH == config->oper_mode)
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
+ /* Truncate key to blocksize */
+ dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ } else {
+ dev_dbg(device_data->dev, "[%s] LKEY cleared",
+ __func__);
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ }
+ } else { /* Wrong hash mode */
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * hash_begin - This routine resets some globals and initializes the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @ctx: Hash context.
+ *
+ * Reentrancy: Non Re-entrant
+ *
+ * Comments 1. : User need to call hash_setconfiguration API before
+ * calling this API i.e. the current configuration is set
+ * only when bit INIT is set and we set INIT bit in
+ * hash_begin. Changing the configuration during a
+ * computation has no effect so we first set
+ * configuration by calling this API and then set the
+ * INIT bit for the HASH processor and the current
+ * configuration is taken into account. As reading INIT
+ * bit (with correct protection rights) will always
+ * return 0b so we can't make a check at software level.
+ * So the user has to initialize the device for new
+ * configuration to take in to effect.
+ */
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
+{
+ /* HW and SW initializations */
+ /* Note: there is no need to initialize buffer and digest members */
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+}
+
+int hash_process_data(
+ struct hash_device_data *device_data,
+ struct hash_ctx *ctx, int msg_length, u8 *data_buffer,
+ u8 *buffer, u8 *index)
+{
+ int ret = 0;
+ u32 count;
+
+ do {
+ if ((*index + msg_length) < HASH_BLOCK_SIZE) {
+ for (count = 0; count < msg_length; count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ *index += msg_length;
+ msg_length = 0;
+ } else {
+ if (ctx->updated) {
+
+ ret = hash_resume_state(device_data,
+ &ctx->state);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_resume_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ } else {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "init_hash_hw()"
+ " failed!", __func__);
+ goto out;
+ }
+ ctx->updated = 1;
+ }
+ /*
+ * If 'data_buffer' is four byte aligned and
+ * local buffer does not have any data, we can
+ * write data directly from 'data_buffer' to
+ * HW peripheral, otherwise we first copy data
+ * to a local buffer
+ */
+ if ((0 == (((u32)data_buffer) % 4))
+ && (0 == *index))
+ hash_processblock(device_data,
+ (const u32 *)
+ data_buffer, HASH_BLOCK_SIZE);
+ else {
+ for (count = 0; count <
+ (u32)(HASH_BLOCK_SIZE -
+ *index);
+ count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ hash_processblock(device_data,
+ (const u32 *)buffer,
+ HASH_BLOCK_SIZE);
+ }
+ hash_incrementlength(ctx, HASH_BLOCK_SIZE);
+ data_buffer += (HASH_BLOCK_SIZE - *index);
+
+ msg_length -= (HASH_BLOCK_SIZE - *index);
+ *index = 0;
+
+ ret = hash_save_state(device_data,
+ &ctx->state);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_save_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ }
+ } while (msg_length != 0);
+out:
+
+ return ret;
+}
+
+/**
+ * hash_dma_final - The hash dma final function for SHA1/SHA256.
+ * @req: The hash request for the job.
+ */
+static int hash_dma_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+ int bytes_written = 0;
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ }
+
+ if (!ctx->updated) {
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_setconfiguration() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ /* Enable DMA input */
+ if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode) {
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ } else {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_PRIVN_MASK);
+ }
+
+ HASH_INITIALIZE;
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ /* Number of bits in last word = (nbytes * 8) % 32 */
+ HASH_SET_NBLW((req->nbytes * 8) % 32);
+ ctx->updated = 1;
+ }
+
+ /* Store the nents in the dma struct. */
+ ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
+ if (!ctx->device->dma.nents) {
+ dev_err(device_data->dev, "[%s] "
+ "ctx->device->dma.nents = 0", __func__);
+ goto out_power;
+ }
+
+ bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
+ if (bytes_written != req->nbytes) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_dma_write() failed!", __func__);
+ goto out_power;
+ }
+
+ wait_for_completion(&ctx->device->dma.complete);
+ hash_dma_done(ctx);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_final - The final hash calculation function
+ * @req: The hash request for the job.
+ */
+int hash_hw_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ if (ctx->updated) {
+ ret = hash_resume_state(device_data, &ctx->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen == 0) {
+ u8 zero_hash[SHA256_DIGEST_SIZE];
+ u32 zero_hash_size = 0;
+ bool zero_digest = false;
+ /**
+ * Use a pre-calculated empty message digest
+ * (workaround since hw return zeroes, hw bug!?)
+ */
+ ret = get_empty_message_digest(device_data, &zero_hash[0],
+ &zero_hash_size, &zero_digest);
+ if (!ret && likely(zero_hash_size == ctx->digestsize) &&
+ zero_digest) {
+ memcpy(req->result, &zero_hash[0], ctx->digestsize);
+ goto out_power;
+ } else if (!ret && !zero_digest) {
+ dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
+ "key, continue...", __func__);
+ } else {
+ dev_err(device_data->dev, "[%s] ret=%d, or wrong "
+ "digest size? %s", __func__, ret,
+ (zero_hash_size == ctx->digestsize) ?
+ "true" : "false");
+ /* Return error */
+ goto out_power;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen > 0) {
+ dev_err(device_data->dev, "[%s] Empty message with "
+ "keylength > 0, NOT supported.", __func__);
+ goto out_power;
+ }
+
+ if (!ctx->updated) {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] init_hash_hw() "
+ "failed!", __func__);
+ goto out_power;
+ }
+ }
+
+ if (ctx->state.index) {
+ hash_messagepad(device_data, ctx->state.buffer,
+ ctx->state.index);
+ } else {
+ HASH_SET_DCAL;
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+ }
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s] hash_disable_power() failed!",
+ __func__);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_update - Updates current HASH computation hashing another part of
+ * the message.
+ * @req: Byte array containing the message to be hashed (caller
+ * allocated).
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_hw_update(struct ahash_request *req)
+{
+ int ret = 0;
+ u8 index = 0;
+ u8 *buffer;
+ struct hash_device_data *device_data;
+ u8 *data_buffer;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct crypto_hash_walk walk;
+ int msg_length = crypto_hash_walk_first(req, &walk);
+
+ /* Empty message ("") is correct indata */
+ if (msg_length == 0)
+ return ret;
+
+ index = ctx->state.index;
+ buffer = (u8 *)ctx->state.buffer;
+
+ /* Check if ctx->state.length + msg_length
+ overflows */
+ if (msg_length > (ctx->state.length.low_word + msg_length) &&
+ HASH_HIGH_WORD_MAX_VAL ==
+ ctx->state.length.high_word) {
+ pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
+ __func__);
+ return -EPERM;
+ }
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(device_data->dev, "[%s]: "
+ "hash_enable_power() failed!", __func__);
+ goto out;
+ }
+
+ /* Main loop */
+ while (0 != msg_length) {
+ data_buffer = walk.data;
+ ret = hash_process_data(device_data, ctx,
+ msg_length, data_buffer, buffer, &index);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_internal_hw_"
+ "update() failed!", __func__);
+ goto out_power;
+ }
+
+ msg_length = crypto_hash_walk_done(&walk, 0);
+ }
+
+ ctx->state.index = index;
+ dev_dbg(device_data->dev, "[%s] indata length=%d, "
+ "bin=%d))", __func__, ctx->state.index, ctx->state.bit_index);
+
+out_power:
+ /* Disable power (and clock) */
+ if (hash_disable_power(device_data, false))
+ dev_err(device_data->dev, "[%s]: "
+ "hash_disable_power() failed!", __func__);
+out:
+ release_hash_device(device_data);
+
+ return ret;
+}
+
+/**
+ * hash_resume_state - Function that resumes the state of an calculation.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The state to be restored in the hash hardware
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *device_state)
+{
+ u32 temp_cr;
+ s32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Check correctness of index and length members */
+ if (device_state->index > HASH_BLOCK_SIZE
+ || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ temp_cr = device_state->temp_cr;
+ writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ writel_relaxed(device_state->csr[count],
+ &device_data->base->csrx[count]);
+ }
+
+ writel_relaxed(device_state->csfull, &device_data->base->csfull);
+ writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
+
+ writel_relaxed(device_state->str_reg, &device_data->base->str);
+ writel_relaxed(temp_cr, &device_data->base->cr);
+
+ return 0;
+}
+
+/**
+ * hash_save_state - Function that saves the state of hardware.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The strucure where the hardware state should be saved.
+ *
+ * Reentrancy: Non Re-entrant
+ */
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *device_state)
+{
+ u32 temp_cr;
+ u32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Write dummy value to force digest intermediate calculation. This
+ * actually makes sure that there isn't any ongoing calculation in the
+ * hardware.
+ */
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ temp_cr = readl_relaxed(&device_data->base->cr);
+
+ device_state->str_reg = readl_relaxed(&device_data->base->str);
+
+ device_state->din_reg = readl_relaxed(&device_data->base->din);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ device_state->csr[count] =
+ readl_relaxed(&device_data->base->csrx[count]);
+ }
+
+ device_state->csfull = readl_relaxed(&device_data->base->csfull);
+ device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
+
+ device_state->temp_cr = temp_cr;
+
+ return 0;
+}
+
+/**
+ * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
+ * @device_data:
+ *
+ */
+int hash_check_hw(struct hash_device_data *device_data)
+{
+ int ret = 0;
+
+ if (NULL == device_data) {
+ ret = -EPERM;
+ pr_err(DEV_DBG_NAME " [%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ goto out;
+ }
+
+ /* Checking Peripheral Ids */
+ if ((HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0))
+ && (HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1))
+ && (HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2))
+ && (HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3))
+ && (HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0))
+ && (HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1))
+ && (HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2))
+ && (HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3))
+ ) {
+ ret = 0;
+ goto out;;
+ } else {
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
+ __func__);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/**
+ * hash_get_digest - Gets the digest.
+ * @device_data: Pointer to the device structure.
+ * @digest: User allocated byte array for the calculated digest.
+ * @algorithm: The algorithm in use.
+ *
+ * Reentrancy: Non Re-entrant, global variable registry (hash control register)
+ * is being modified.
+ *
+ * Note that, if this is called before the final message has been handle it
+ * will return the intermediate message digest.
+ */
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm)
+{
+ u32 temp_hx_val, count;
+ int loop_ctr;
+
+ if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
+ dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
+ __func__, algorithm);
+ return;
+ }
+
+ if (algorithm == HASH_ALGO_SHA1)
+ loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
+ else
+ loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
+
+ dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
+ __func__, (u32) digest);
+
+ /* Copy result into digest array */
+ for (count = 0; count < loop_ctr; count++) {
+ temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
+ digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
+ digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
+ digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
+ digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
+ }
+}
+
+/**
+ * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_update(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (hash_mode != HASH_MODE_DMA || !ctx->dma_mode)
+ ret = hash_hw_update(req);
+ /* Skip update for DMA, all data will be passed to DMA in final */
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
+
+ if ((hash_mode == HASH_MODE_DMA) && ctx->dma_mode)
+ ret = hash_dma_final(req);
+ else
+ ret = hash_hw_final(req);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
+ __func__);
+ }
+
+ return ret;
+}
+
+static int hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen, int alg)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /**
+ * Freed in final.
+ */
+ ctx->key = kmalloc(keylen, GFP_KERNEL);
+ if (!ctx->key) {
+ pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
+ "for %d\n", __func__, alg);
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return ret;
+ }
+
+static int ahash_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = ahash_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int ahash_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = ahash_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = hmac_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = hmac_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
+}
+
+static int hmac_sha256_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
+}
+
+static struct ahash_alg ahash_sha1_alg = {
+ .init = ahash_sha1_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha1_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg ahash_sha256_alg = {
+ .init = ahash_sha256_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha256_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg hmac_sha1_alg = {
+ .init = hmac_sha1_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha1_digest,
+ .setkey = hmac_sha1_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct ahash_alg hmac_sha256_alg = {
+ .init = hmac_sha256_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha256_digest,
+ .setkey = hmac_sha256_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/**
+ * struct hash_alg *ux500_hash_algs[] -
+ */
+static struct ahash_alg *ux500_ahash_algs[] = {
+ &ahash_sha1_alg,
+ &ahash_sha256_alg,
+ &hmac_sha1_alg,
+ &hmac_sha256_alg
+};
+
+/**
+ * hash_algs_register_all -
+ */
+static int ahash_algs_register_all(struct hash_device_data *device_data)
+{
+ int ret;
+ int i;
+ int count;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++) {
+ ret = crypto_register_ahash(ux500_ahash_algs[i]);
+ if (ret) {
+ count = i;
+ dev_err(device_data->dev, "[%s] alg registration"
+ " failed",
+ ux500_ahash_algs[i]->halg.base.cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_ahash(ux500_ahash_algs[i]);
+ return ret;
+}
+
+/**
+ * hash_algs_unregister_all -
+ */
+static void ahash_algs_unregister_all(struct hash_device_data *device_data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_ahash_algs); i++)
+ crypto_unregister_ahash(ux500_ahash_algs[i]);
+}
+
+/**
+ * ux500_hash_probe - Function that probes the hash hardware.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s] ioremap() failed!",
+ __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_lock);
+
+ /* Enable power for HASH1 hardware block */
+ device_data->regulator = ux500_regulator_get(dev);
+ if (IS_ERR(device_data->regulator)) {
+ dev_err(dev, "[%s] regulator_get() failed!", __func__);
+ ret = PTR_ERR(device_data->regulator);
+ device_data->regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clock for HASH1 hardware block */
+ device_data->clk = clk_get(dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s] clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ ret = hash_check_hw(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
+ goto out_power;
+ }
+
+ if (hash_mode == HASH_MODE_DMA)
+ hash_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ ret = ahash_algs_register_all(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] ahash_algs_register_all() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed!", __func__);
+
+ dev_info(dev, "[%s] successfully probed\n", __func__);
+ return 0;
+
+out_power:
+ hash_disable_power(device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ ux500_regulator_put(device_data->regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+/**
+ * ux500_hash_remove - Function that removes the hash device from the platform.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ ux500_regulator_put(device_data->regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(device_data);
+
+ return 0;
+}
+
+/**
+ * ux500_hash_shutdown - Function that shutdown the hash device.
+ * @pdev: The platform device
+ */
+static void ux500_hash_shutdown(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ if (hash_disable_power(device_data, false))
+ dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
+ __func__);
+}
+
+/**
+ * ux500_hash_suspend - Function that suspends the hash device.
+ * @pdev: The platform device.
+ * @state: -
+ */
+static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = hash_disable_power(device_data, false);
+
+ } else
+ ret = hash_disable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
+
+ return ret;
+}
+
+/**
+ * ux500_hash_resume - Function that resume the hash device.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = hash_enable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
+ __func__);
+
+ return ret;
+}
+
+static struct platform_driver hash_driver = {
+ .probe = ux500_hash_probe,
+ .remove = ux500_hash_remove,
+ .shutdown = ux500_hash_shutdown,
+ .suspend = ux500_hash_suspend,
+ .resume = ux500_hash_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hash1",
+ }
+};
+
+/**
+ * ux500_hash_mod_init - The kernel module init function.
+ */
+static int __init ux500_hash_mod_init(void)
+{
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+
+ return platform_driver_register(&hash_driver);
+}
+
+/**
+ * ux500_hash_mod_fini - The kernel module exit function.
+ */
+static void __exit ux500_hash_mod_fini(void)
+{
+ platform_driver_unregister(&hash_driver);
+ return;
+}
+
+module_init(ux500_hash_mod_init);
+module_exit(ux500_hash_mod_fini);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("sha1-all");
+MODULE_ALIAS("sha256-all");
+MODULE_ALIAS("hmac-sha1-all");
+MODULE_ALIAS("hmac-sha256-all");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f..0fe43c3f598 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -345,6 +345,7 @@ struct d40_base {
int irq;
int num_phy_chans;
int num_log_chans;
+ struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
@@ -2577,6 +2578,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
+ if (src_maxburst > 16) {
+ src_maxburst = 16;
+ dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
+ } else if (dst_maxburst > 16) {
+ dst_maxburst = 16;
+ src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
+ }
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
@@ -2639,6 +2648,56 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return -ENXIO;
}
+dma_addr_t stedma40_get_src_addr(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_addr_t addr;
+
+ if (chan_is_physical(d40c))
+ addr = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSPTR);
+ else {
+ unsigned long lower;
+ unsigned long upper;
+
+ /*
+ * There is a potential for overflow between the time the two
+ * halves of the pointer are read.
+ */
+ lower = d40c->lcpa->lcsp0 & D40_MEM_LCSP0_SPTR_MASK;
+ upper = d40c->lcpa->lcsp1 & D40_MEM_LCSP1_SPTR_MASK;
+
+ addr = upper | lower;
+ }
+
+ return addr;
+}
+EXPORT_SYMBOL(stedma40_get_src_addr);
+
+dma_addr_t stedma40_get_dst_addr(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_addr_t addr;
+
+ if (chan_is_physical(d40c))
+ addr = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDPTR);
+ else {
+ unsigned long lower;
+ unsigned long upper;
+
+ lower = d40c->lcpa->lcsp2 & D40_MEM_LCSP2_DPTR_MASK;
+ upper = d40c->lcpa->lcsp3 & D40_MEM_LCSP3_DPTR_MASK;
+
+ addr = upper | lower;
+ }
+
+ return addr;
+}
+EXPORT_SYMBOL(stedma40_get_dst_addr);
+
/* Initialization functions */
static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -2773,8 +2832,6 @@ static int dma40_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0;
- if (!pm_runtime_suspended(dev))
- return -EBUSY;
if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator);
@@ -3358,6 +3415,13 @@ static int __init d40_probe(struct platform_device *pdev)
if (err)
goto failure;
+ base->dev->dma_parms = &base->dma_parms;
+ err = dma_set_max_seg_size(base->dev, 0xffff);
+ if (err) {
+ d40_err(&pdev->dev, "Failed to set dma max seg size\n");
+ goto failure;
+ }
+
d40_hw_init(base);
dev_info(base->dev, "initialized\n");
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index cad9e1daedf..d47d1fa36b9 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -102,16 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
} else {
/* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
}
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
@@ -331,10 +333,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcpa[0].lcsp0);
- writel(lli_src->lcsp13, &lcpa[0].lcsp1);
- writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
- writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
+ writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
+ writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
+ writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
+ writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
@@ -344,10 +346,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{
d40_log_lli_link(lli_dst, lli_src, next, flags);
- writel(lli_src->lcsp02, &lcla[0].lcsp02);
- writel(lli_src->lcsp13, &lcla[0].lcsp13);
- writel(lli_dst->lcsp02, &lcla[1].lcsp02);
- writel(lli_dst->lcsp13, &lcla[1].lcsp13);
+ writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
+ writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
+ writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
+ writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
}
static void d40_log_fill_lli(struct d40_log_lli *lli,
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 51e8e5396e9..2f3395d6850 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -94,10 +94,13 @@
/* LCSP2 */
#define D40_MEM_LCSP2_ECNT_POS 16
+#define D40_MEM_LCSP2_DPTR_POS 0
#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
+#define D40_MEM_LCSP2_DPTR_MASK (0xFFFF << D40_MEM_LCSP2_DPTR_POS)
/* LCSP3 */
+#define D40_MEM_LCSP3_DPTR_POS 16
#define D40_MEM_LCSP3_DCFG_MST_POS 15
#define D40_MEM_LCSP3_DCFG_TIM_POS 14
#define D40_MEM_LCSP3_DCFG_EIM_POS 13
@@ -107,6 +110,7 @@
#define D40_MEM_LCSP3_DLOS_POS 1
#define D40_MEM_LCSP3_DTCP_POS 0
+#define D40_MEM_LCSP3_DPTR_MASK (0xFFFF << D40_MEM_LCSP3_DPTR_POS)
#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index e03653d6935..6226d0cd30a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -504,7 +504,7 @@ config GPIO_JANZ_TTL
config GPIO_AB8500
bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE && BROKEN
+ depends on AB8500_CORE
help
Select this to enable the AB8500 IC GPIO driver
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index 050c05d9189..ef75984486c 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -18,9 +18,16 @@
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpio.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+
+
+/*
+ * The AB9540 GPIO support is an extended version of the
+ * AB8500 GPIO support. The AB9540 supports an additional
+ * (7th) register so that more GPIO may be configured and
+ * used.
+ */
/*
* GPIO registers offset
@@ -32,6 +39,7 @@
#define AB8500_GPIO_SEL4_REG 0x03
#define AB8500_GPIO_SEL5_REG 0x04
#define AB8500_GPIO_SEL6_REG 0x05
+#define AB9540_GPIO_SEL7_REG 0x06
#define AB8500_GPIO_DIR1_REG 0x10
#define AB8500_GPIO_DIR2_REG 0x11
@@ -39,6 +47,7 @@
#define AB8500_GPIO_DIR4_REG 0x13
#define AB8500_GPIO_DIR5_REG 0x14
#define AB8500_GPIO_DIR6_REG 0x15
+#define AB9540_GPIO_DIR7_REG 0x16
#define AB8500_GPIO_OUT1_REG 0x20
#define AB8500_GPIO_OUT2_REG 0x21
@@ -46,6 +55,7 @@
#define AB8500_GPIO_OUT4_REG 0x23
#define AB8500_GPIO_OUT5_REG 0x24
#define AB8500_GPIO_OUT6_REG 0x25
+#define AB9540_GPIO_OUT7_REG 0x26
#define AB8500_GPIO_PUD1_REG 0x30
#define AB8500_GPIO_PUD2_REG 0x31
@@ -53,6 +63,7 @@
#define AB8500_GPIO_PUD4_REG 0x33
#define AB8500_GPIO_PUD5_REG 0x34
#define AB8500_GPIO_PUD6_REG 0x35
+#define AB9540_GPIO_PUD7_REG 0x36
#define AB8500_GPIO_IN1_REG 0x40
#define AB8500_GPIO_IN2_REG 0x41
@@ -60,9 +71,13 @@
#define AB8500_GPIO_IN4_REG 0x43
#define AB8500_GPIO_IN5_REG 0x44
#define AB8500_GPIO_IN6_REG 0x45
-#define AB8500_GPIO_ALTFUN_REG 0x45
-#define ALTFUN_REG_INDEX 6
+#define AB9540_GPIO_IN7_REG 0x46
+#define AB8500_GPIO_ALTFUN_REG 0x50
+#define AB8500_ALTFUN_REG_INDEX 6
+#define AB9540_ALTFUN_REG_INDEX 7
#define AB8500_NUM_GPIO 42
+#define AB9540_NUM_GPIO 54
+#define AB8505_NUM_GPIO 53
#define AB8500_NUM_VIR_GPIO_IRQ 16
enum ab8500_gpio_action {
@@ -73,6 +88,11 @@ enum ab8500_gpio_action {
UNMASK
};
+struct ab8500_gpio_irq_cluster {
+ int start;
+ int end;
+};
+
struct ab8500_gpio {
struct gpio_chip chip;
struct ab8500 *parent;
@@ -82,7 +102,50 @@ struct ab8500_gpio {
enum ab8500_gpio_action irq_action;
u16 rising;
u16 falling;
+ struct ab8500_gpio_irq_cluster *irq_cluster;
+ int irq_cluster_size;
+};
+
+/*
+ * Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ * GPIO6 to GPIO13
+ * GPIO24 and GPIO25
+ * GPIO36 to GPIO41
+ * GPIO50 to GPIO54 (AB9540 only)
+ */
+static struct ab8500_gpio_irq_cluster ab8500_irq_clusters[] = {
+ {.start = 5, .end = 12}, /* GPIO numbers start from 1 */
+ {.start = 23, .end = 24},
+ {.start = 35, .end = 40},
+};
+
+static struct ab8500_gpio_irq_cluster ab9540_irq_clusters[] = {
+ {.start = 5, .end = 12}, /* GPIO numbers start from 1 */
+ {.start = 23, .end = 24},
+ {.start = 35, .end = 40},
+ {.start = 49, .end = 53},
+};
+
+/*
+ * For AB8505 Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ * GPIO10 to GPIO11
+ * GPIO13
+ * GPIO40 and GPIO41
+ * GPIO50
+ * GPIO52 to GPIO53
+ */
+static struct ab8500_gpio_irq_cluster ab8505_irq_clusters[] = {
+ {.start = 9, .end = 10}, /* GPIO numbers start from 1 */
+ {.start = 12, .end = 12},
+ {.start = 39, .end = 40},
+ {.start = 49, .end = 49},
+ {.start = 51, .end = 52},
};
+
/**
* to_ab8500_gpio() - get the pointer to ab8500_gpio
* @chip: Member of the structure ab8500_gpio
@@ -115,7 +178,7 @@ static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
u8 mask = 1 << (offset % 8);
- u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
+ u8 reg = AB8500_GPIO_IN1_REG + (offset / 8);
int ret;
u8 data;
ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
@@ -132,7 +195,7 @@ static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
int ret;
/* Write the data */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
+ ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
if (ret < 0)
dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
}
@@ -162,28 +225,13 @@ static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- /*
- * Only some GPIOs are interrupt capable, and they are
- * organized in discontiguous clusters:
- *
- * GPIO6 to GPIO13
- * GPIO24 and GPIO25
- * GPIO36 to GPIO41
- */
- static struct ab8500_gpio_irq_cluster {
- int start;
- int end;
- } clusters[] = {
- {.start = 6, .end = 13},
- {.start = 24, .end = 25},
- {.start = 36, .end = 41},
- };
struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
int base = ab8500_gpio->irq_base;
int i;
- for (i = 0; i < ARRAY_SIZE(clusters); i++) {
- struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
+ for (i = 0; i < ab8500_gpio->irq_cluster_size; i++) {
+ struct ab8500_gpio_irq_cluster *cluster =
+ &ab8500_gpio->irq_cluster[i];
if (offset >= cluster->start && offset <= cluster->end)
return base + offset - cluster->start;
@@ -207,7 +255,7 @@ static struct gpio_chip ab8500gpio_chip = {
static unsigned int irq_to_rising(unsigned int irq)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq);
int offset = irq - ab8500_gpio->irq_base;
int new_irq = offset + AB8500_INT_GPIO6R
+ ab8500_gpio->parent->irq_base;
@@ -216,7 +264,7 @@ static unsigned int irq_to_rising(unsigned int irq)
static unsigned int irq_to_falling(unsigned int irq)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq);
int offset = irq - ab8500_gpio->irq_base;
int new_irq = offset + AB8500_INT_GPIO6F
+ ab8500_gpio->parent->irq_base;
@@ -261,15 +309,16 @@ static irqreturn_t handle_falling(int irq, void *dev)
return IRQ_HANDLED;
}
-static void ab8500_gpio_irq_lock(unsigned int irq)
+static void ab8500_gpio_irq_lock(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500_gpio->lock);
}
-static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
+static void ab8500_gpio_irq_sync_unlock(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq;
int offset = irq - ab8500_gpio->irq_base;
bool rising = ab8500_gpio->rising & BIT(offset);
bool falling = ab8500_gpio->falling & BIT(offset);
@@ -280,12 +329,12 @@ static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
if (rising)
ret = request_threaded_irq(irq_to_rising(irq),
NULL, handle_rising,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
"ab8500-gpio-r", ab8500_gpio);
if (falling)
ret = request_threaded_irq(irq_to_falling(irq),
NULL, handle_falling,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
"ab8500-gpio-f", ab8500_gpio);
break;
case SHUTDOWN:
@@ -316,21 +365,22 @@ static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
}
-static void ab8500_gpio_irq_mask(unsigned int irq)
+static void ab8500_gpio_irq_mask(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = MASK;
}
-static void ab8500_gpio_irq_unmask(unsigned int irq)
+static void ab8500_gpio_irq_unmask(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = UNMASK;
}
-static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int ab8500_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
+ unsigned int irq = data->irq;
int offset = irq - ab8500_gpio->irq_base;
if (type == IRQ_TYPE_EDGE_BOTH) {
@@ -344,28 +394,28 @@ static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
return 0;
}
-unsigned int ab8500_gpio_irq_startup(unsigned int irq)
+unsigned int ab8500_gpio_irq_startup(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = STARTUP;
return 0;
}
-void ab8500_gpio_irq_shutdown(unsigned int irq)
+void ab8500_gpio_irq_shutdown(struct irq_data *data)
{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
+ struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data);
ab8500_gpio->irq_action = SHUTDOWN;
}
static struct irq_chip ab8500_gpio_irq_chip = {
.name = "ab8500-gpio",
- .startup = ab8500_gpio_irq_startup,
- .shutdown = ab8500_gpio_irq_shutdown,
- .bus_lock = ab8500_gpio_irq_lock,
- .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
- .mask = ab8500_gpio_irq_mask,
- .unmask = ab8500_gpio_irq_unmask,
- .set_type = ab8500_gpio_irq_set_type,
+ .irq_startup = ab8500_gpio_irq_startup,
+ .irq_shutdown = ab8500_gpio_irq_shutdown,
+ .irq_bus_lock = ab8500_gpio_irq_lock,
+ .irq_bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
+ .irq_mask = ab8500_gpio_irq_mask,
+ .irq_unmask = ab8500_gpio_irq_unmask,
+ .irq_set_type = ab8500_gpio_irq_set_type,
};
static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
@@ -374,14 +424,14 @@ static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
int irq;
for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
- set_irq_chip_data(irq, ab8500_gpio);
- set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
+ irq_set_chip_data(irq, ab8500_gpio);
+ irq_set_chip_and_handler(irq, &ab8500_gpio_irq_chip,
handle_simple_irq);
- set_irq_nested_thread(irq, 1);
+ irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
@@ -397,19 +447,22 @@ static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
}
}
static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
{
+ struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
struct ab8500_platform_data *ab8500_pdata =
dev_get_platdata(pdev->dev.parent);
struct ab8500_gpio_platform_data *pdata;
struct ab8500_gpio *ab8500_gpio;
int ret;
int i;
+ int last_gpio_sel_reg;
+ int altfun_reg_index;
pdata = ab8500_pdata->gpio;
if (!pdata) {
@@ -425,10 +478,36 @@ static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
ab8500_gpio->dev = &pdev->dev;
ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
ab8500_gpio->chip = ab8500gpio_chip;
- ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
ab8500_gpio->chip.dev = &pdev->dev;
ab8500_gpio->chip.base = pdata->gpio_base;
ab8500_gpio->irq_base = pdata->irq_base;
+
+ /* Configure GPIO Settings for specific AB devices */
+ if (cpu_is_u9540()) {
+ ab8500_gpio->chip.ngpio = AB9540_NUM_GPIO;
+ ab8500_gpio->irq_cluster = ab9540_irq_clusters;
+ ab8500_gpio->irq_cluster_size =
+ ARRAY_SIZE(ab9540_irq_clusters);
+ last_gpio_sel_reg = AB9540_GPIO_SEL7_REG;
+ altfun_reg_index = AB9540_ALTFUN_REG_INDEX;
+ } else {
+ if (is_ab8505(parent)) {
+ ab8500_gpio->chip.ngpio = AB8505_NUM_GPIO;
+ ab8500_gpio->irq_cluster = ab8505_irq_clusters;
+ ab8500_gpio->irq_cluster_size =
+ ARRAY_SIZE(ab8505_irq_clusters);
+ last_gpio_sel_reg = AB9540_GPIO_SEL7_REG;
+ altfun_reg_index = AB9540_ALTFUN_REG_INDEX;
+ } else {
+ ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
+ ab8500_gpio->irq_cluster = ab8500_irq_clusters;
+ ab8500_gpio->irq_cluster_size =
+ ARRAY_SIZE(ab8500_irq_clusters);
+ last_gpio_sel_reg = AB8500_GPIO_SEL6_REG;
+ altfun_reg_index = AB8500_ALTFUN_REG_INDEX;
+ }
+ }
+
/* initialize the lock */
mutex_init(&ab8500_gpio->lock);
/*
@@ -437,16 +516,28 @@ static int __devinit ab8500_gpio_probe(struct platform_device *pdev)
* These values are for selecting the PINs as
* GPIO or alternate function
*/
- for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) {
+ for (i = AB8500_GPIO_SEL1_REG; i <= last_gpio_sel_reg; i++) {
ret = abx500_set_register_interruptible(ab8500_gpio->dev,
AB8500_MISC, i,
pdata->config_reg[i]);
if (ret < 0)
goto out_free;
+
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i + AB8500_GPIO_DIR1_REG,
+ pdata->config_direction[i]);
+ if (ret < 0)
+ goto out_free;
+
+ ret = abx500_set_register_interruptible(ab8500_gpio->dev,
+ AB8500_MISC, i + AB8500_GPIO_PUD1_REG,
+ pdata->config_pullups[i]);
+ if (ret < 0)
+ goto out_free;
}
ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
AB8500_GPIO_ALTFUN_REG,
- pdata->config_reg[ALTFUN_REG_INDEX]);
+ pdata->config_reg[altfun_reg_index]);
if (ret < 0)
goto out_free;
@@ -493,6 +584,86 @@ static int __devexit ab8500_gpio_remove(struct platform_device *pdev)
return 0;
}
+int ab8500_config_pulldown(struct device *dev,
+ enum ab8500_pin gpio, bool enable)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 pos = offset % 8;
+ u8 val = enable ? 0 : 1;
+ u8 reg = AB8500_GPIO_PUD1_REG + (offset / 8);
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(dev, "%s write failed\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL(ab8500_config_pulldown);
+
+/*
+ * ab8500_gpio_config_select()
+ *
+ * Configure functionality of pin, either specific use or GPIO.
+ * @dev: device pointer
+ * @gpio: gpio number
+ * @gpio_select: true if the pin should be used as GPIO
+ */
+int ab8500_gpio_config_select(struct device *dev,
+ enum ab8500_pin gpio, bool gpio_select)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8);
+ u8 pos = offset % 8;
+ u8 val = gpio_select ? 1 : 0;
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(dev,
+ AB8500_MISC, reg, 1 << pos, val << pos);
+ if (ret < 0)
+ dev_err(dev, "%s write failed\n", __func__);
+
+ dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ __func__, AB8500_MISC, reg, 1 << pos, val << pos);
+
+ return ret;
+}
+
+/*
+ * ab8500_gpio_config_get_select()
+ *
+ * Read currently configured functionality, either specific use or GPIO.
+ * @dev: device pointer
+ * @gpio: gpio number
+ * @gpio_select: pointer to pin selection status
+ */
+int ab8500_gpio_config_get_select(struct device *dev,
+ enum ab8500_pin gpio, bool *gpio_select)
+{
+ u8 offset = gpio - AB8500_PIN_GPIO1;
+ u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8);
+ u8 pos = offset % 8;
+ u8 val;
+ int ret;
+
+ ret = abx500_get_register_interruptible(dev,
+ AB8500_MISC, reg, &val);
+ if (ret < 0) {
+ dev_err(dev, "%s read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & (1 << pos))
+ *gpio_select = true;
+ else
+ *gpio_select = false;
+
+ dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ __func__, AB8500_MISC, reg, 1 << pos, val);
+
+ return 0;
+}
+
static struct platform_driver ab8500_gpio_driver = {
.driver = {
.name = "ab8500-gpio",
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 839624f9fe6..05547ed48ff 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -23,12 +23,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/gpio/nomadik.h>
#include <asm/mach/irq.h>
#include <plat/pincfg.h>
-#include <plat/gpio-nomadik.h>
-#include <mach/hardware.h>
#include <asm/gpio.h>
/*
@@ -58,8 +57,11 @@ struct nmk_gpio_chip {
u32 real_wake;
u32 rwimsc;
u32 fwimsc;
+ u32 rimsc;
+ u32 fimsc;
u32 slpm;
u32 pull_up;
+ u32 lowemi;
};
static struct nmk_gpio_chip *
@@ -124,6 +126,24 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
}
}
+static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, bool lowemi)
+{
+ u32 bit = BIT(offset);
+ bool enabled = nmk_chip->lowemi & bit;
+
+ if (lowemi == enabled)
+ return;
+
+ if (lowemi)
+ nmk_chip->lowemi |= bit;
+ else
+ nmk_chip->lowemi &= ~bit;
+
+ writel_relaxed(nmk_chip->lowemi,
+ nmk_chip->addr + NMK_GPIO_LOWEMI);
+}
+
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
unsigned offset)
{
@@ -150,8 +170,8 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int gpio_mode,
bool glitch)
{
- u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+ u32 rwimsc = nmk_chip->rwimsc;
+ u32 fwimsc = nmk_chip->fwimsc;
if (glitch && nmk_chip->set_ioforce) {
u32 bit = BIT(offset);
@@ -173,6 +193,36 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
}
}
+static void
+nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
+{
+ u32 falling = nmk_chip->fimsc & BIT(offset);
+ u32 rising = nmk_chip->rimsc & BIT(offset);
+ int gpio = nmk_chip->chip.base + offset;
+ int irq = NOMADIK_GPIO_TO_IRQ(gpio);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ if (!rising && !falling)
+ return;
+
+ if (!d || !irqd_irq_disabled(d))
+ return;
+
+ if (rising) {
+ nmk_chip->rimsc &= ~BIT(offset);
+ writel_relaxed(nmk_chip->rimsc,
+ nmk_chip->addr + NMK_GPIO_RIMSC);
+ }
+
+ if (falling) {
+ nmk_chip->fimsc &= ~BIT(offset);
+ writel_relaxed(nmk_chip->fimsc,
+ nmk_chip->addr + NMK_GPIO_FIMSC);
+ }
+
+ dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio);
+}
+
static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
{
@@ -238,6 +288,17 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
__nmk_gpio_set_pull(nmk_chip, offset, pull);
}
+ __nmk_gpio_set_lowemi(nmk_chip, offset, PIN_LOWEMI(cfg));
+
+ /*
+ * If the pin is switching to altfunc, and there was an interrupt
+ * installed on it which has been lazy disabled, actually mask the
+ * interrupt to prevent spurious interrupts that would occur while the
+ * pin is under control of the peripheral. Only SKE does this.
+ */
+ if (af != NMK_GPIO_ALT_GPIO)
+ nmk_gpio_disable_lazy_irq(nmk_chip, offset);
+
/*
* If we've backed up the SLPM registers (glitch workaround), modify
* the backups since they will be restored.
@@ -359,7 +420,7 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
/**
* nmk_config_pin - configure a pin's mux attributes
* @cfg: pin confguration
- *
+ * @sleep: Non-zero to apply the sleep mode configuration
* Configures a pin's mode (alternate function or GPIO), its pull up status,
* and its sleep mode based on the specified configuration. The @cfg is
* usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
@@ -556,27 +617,38 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
int gpio, enum nmk_gpio_irq_type which,
bool enable)
{
- u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC;
- u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC;
u32 bitmask = nmk_gpio_get_bitmask(gpio);
- u32 reg;
+ u32 *rimscval;
+ u32 *fimscval;
+ u32 rimscreg;
+ u32 fimscreg;
+
+ if (which == NORMAL) {
+ rimscreg = NMK_GPIO_RIMSC;
+ fimscreg = NMK_GPIO_FIMSC;
+ rimscval = &nmk_chip->rimsc;
+ fimscval = &nmk_chip->fimsc;
+ } else {
+ rimscreg = NMK_GPIO_RWIMSC;
+ fimscreg = NMK_GPIO_FWIMSC;
+ rimscval = &nmk_chip->rwimsc;
+ fimscval = &nmk_chip->fwimsc;
+ }
/* we must individually set/clear the two edges */
if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + rimsc);
if (enable)
- reg |= bitmask;
+ *rimscval |= bitmask;
else
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + rimsc);
+ *rimscval &= ~bitmask;
+ writel(*rimscval, nmk_chip->addr + rimscreg);
}
if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + fimsc);
if (enable)
- reg |= bitmask;
+ *fimscval |= bitmask;
else
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + fimsc);
+ *fimscval &= ~bitmask;
+ writel(*fimscval, nmk_chip->addr + fimscreg);
}
}
@@ -1008,9 +1080,6 @@ void nmk_gpio_wakeups_suspend(void)
clk_enable(chip->clk);
- chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
- chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
-
writel(chip->rwimsc & chip->real_wake,
chip->addr + NMK_GPIO_RWIMSC);
writel(chip->fwimsc & chip->real_wake,
@@ -1076,6 +1145,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
struct resource *res;
struct clk *clk;
int secondary_irq;
+ void __iomem *base;
int irq;
int ret;
@@ -1106,10 +1176,16 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
goto out;
}
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
clk = clk_get(&dev->dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto out_release;
+ goto out_unmap;
}
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
@@ -1123,7 +1199,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
*/
nmk_chip->bank = dev->id;
nmk_chip->clk = clk;
- nmk_chip->addr = io_p2v(res->start);
+ nmk_chip->addr = base;
nmk_chip->chip = nmk_gpio_template;
nmk_chip->parent_irq = irq;
nmk_chip->secondary_parent_irq = secondary_irq;
@@ -1139,6 +1215,10 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
chip->dev = &dev->dev;
chip->owner = THIS_MODULE;
+ clk_enable(nmk_chip->clk);
+ nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
+ clk_disable(nmk_chip->clk);
+
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
goto out_free;
@@ -1159,6 +1239,8 @@ out_free:
out_clk:
clk_disable(clk);
clk_put(clk);
+out_unmap:
+ iounmap(base);
out_release:
release_mem_region(res->start, resource_size(res));
out:
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd..b2ae09b4cc7 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1,2 @@
obj-y += drm/ vga/ stub/
+obj-y += mali/
diff --git a/drivers/gpu/mali/Kconfig b/drivers/gpu/mali/Kconfig
new file mode 100644
index 00000000000..0781bcbd854
--- /dev/null
+++ b/drivers/gpu/mali/Kconfig
@@ -0,0 +1,15 @@
+config GPU_MALI
+ tristate "ARM Mali 200/300/400 support"
+ depends on ARM
+ ---help---
+ This enables support for the ARM Mali 200/300/400 family of GPUs.
+ Platform specific configuration is made in configuration files in the
+ drivers/gpu/mali/src folder
+
+config GPU_MALI_DEBUG
+ boolean "Debug mode (required for instrumentation)"
+ default y
+ depends on GPU_MALI
+ ---help---
+ This enables debug prints in the mali device driver. Debug mode must be
+ enabled in order to use the intrumentation feature of the mali libraries.
diff --git a/drivers/gpu/mali/Makefile b/drivers/gpu/mali/Makefile
new file mode 100644
index 00000000000..93c95aca19a
--- /dev/null
+++ b/drivers/gpu/mali/Makefile
@@ -0,0 +1,10 @@
+MALI_SUBFOLDER := mali400ko/driver/src/devicedrv/mali
+MALIDRM_SUBFOLDER := mali400ko/x11/mali_drm/mali
+MALI_FOLDER := $(srctree)/$(src)/$(MALI_SUBFOLDER)
+ifeq ($(shell [ -d $(MALI_FOLDER) ] && echo "OK"), OK)
+obj-$(CONFIG_GPU_MALI) += $(MALI_SUBFOLDER)/
+obj-y += $(MALIDRM_SUBFOLDER)/
+else
+$(warning WARNING: mali: Could not find $(MALI_FOLDER) - mali device driver will not be built)
+obj-n += ./
+endif
diff --git a/drivers/gpu/mali/mali400ko/.gitignore b/drivers/gpu/mali/mali400ko/.gitignore
new file mode 100644
index 00000000000..e2d66d55882
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/.gitignore
@@ -0,0 +1,32 @@
+#symlinks created during build
+driver/src/devicedrv/mali/arch
+driver/bin
+driver/devdrv
+driver/lib
+
+#build output
+driver/build
+.tmp_versions
+*.cmd
+*.o
+*.d
+driver/src/devicedrv/linux/__mali_build_info.*
+driver/src/devicedrv/linux/modules.order
+driver/src/devicedrv/linux/Module.symvers
+driver/src/devicedrv/mali/Module.symvers
+driver/src/devicedrv/mali/__malidrv_build_info.c
+driver/src/devicedrv/mali/mali.ko
+driver/src/devicedrv/mali/mali.mod.c
+driver/src/devicedrv/mali/modules.order
+driver/out*
+driver/src/shared/essl_compiler/src/shadergen_maligp2/shader_pieces.*
+
+out
+kernel
+
+#temp files from editors and Eclipse project files
+*~
+.cproject
+.project
+.settings
+
diff --git a/drivers/gpu/mali/mali400ko/Makefile b/drivers/gpu/mali/mali400ko/Makefile
new file mode 100644
index 00000000000..9176f9f3795
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/Makefile
@@ -0,0 +1,102 @@
+#This Makefile can be called to do an out-of-kernel build of tha mali kernel module.
+#It is not used when mali is built from the kernel build system.
+
+CROSS_COMPILE ?= arm-eabi-
+
+ifeq ($(TARGET_PRODUCT),ste_u5500)
+CONFIG_UX500_SOC_DB5500=y
+export CONFIG_UX500_SOC_DB5500
+endif
+
+PWD:=$(shell pwd)
+
+#if KERNEL_BUILD_DIR parameter is not set, assume there is a symlink to the kernel
+KERNEL_BUILD_DIR?=$(PWD)/kernel
+
+# android
+ifneq (,$(findstring -android,$(PLATFORM)))
+INSTALL_MOD_PATH?=$(ANDROID_OUT_TARGET_PRODUCT_DIRECTORY)/system
+endif
+
+# lbp
+ifneq (,$(findstring -linux,$(PLATFORM)))
+PREFIX?=$(MMROOT)/linux/install/cortexA9-linux-gnu-href_v1
+INSTALL_MOD_PATH?=$(PREFIX)
+endif
+
+#if still no kernel module path is set, install the modules in a local folder
+ifeq (,$(INSTALL_MOD_PATH))
+INSTALL_MOD_PATH?=$(PWD)/out
+endif
+
+KO_FLAGS=KDIR=$(KERNEL_BUILD_DIR) CROSS_COMPILE=$(CROSS_COMPILE)
+
+.PHONY: all entry check
+
+all: mali-devicedrv install-mali
+
+check:
+ @if [ ! -d $(KERNEL_BUILD_DIR) ] ; then echo "Error: provide a path to your linux kernel through KERNEL_BUILD_DIR"; exit 1 ; fi
+
+## Build ###############
+
+mali-devicedrv: check
+ $(MAKE) -C driver/src/devicedrv/mali $(KO_FLAGS) all
+
+ump-devicedrv: check
+ $(MAKE) -C driver/src/devicedrv/ump $(KO_FLAGS) all
+
+mali_drm-devicedrv: check
+ $(MAKE) -C x11/mali_drm/mali $(KO_FLAGS) all
+
+## Clean ###############
+
+clean-mali:
+ $(MAKE) clean -C driver/src/devicedrv/mali $(KO_FLAGS)
+
+clean-ump:
+ $(MAKE) clean -C driver/src/devicedrv/ump $(KO_FLAGS)
+
+clean-mali_drm:
+ $(MAKE) clean -C x11/mali_drm/mali $(KO_FLAGS)
+
+clean: clean-mali
+
+realclean:
+ rm -f driver/src/devicedrv/mali/common/*.o
+ rm -f driver/src/devicedrv/mali/linux/*.o
+ rm -f driver/src/devicedrv/mali/common/.*.cmd
+ rm -f driver/src/devicedrv/mali/linux/.*.cmd
+ rm -f driver/src/devicedrv/mali/.*.cmd
+ rm -f driver/src/devicedrv/mali/*.c
+
+distclean: realclean
+
+## Install #############
+
+install: install-mali
+
+install-mali: mali-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/driver/src/devicedrv/mali modules_install
+
+install-ump: ump-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/driver/src/devicedrv/ump modules_install
+
+install-mali_drm: mali_drm-devicedrv
+ $(MAKE) -C $(KERNEL_BUILD_DIR) ARCH=arm CROSS_COMPILE=$(CROSS_COMPILE) KERNELDIR=$(KERNEL_BUILD_DIR) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) M=$(PWD)/x11/mali_drm/mali modules_install
+
+
+printenv:
+ @echo "\033[0;44m"
+ @echo "ANDROID_OUT_TARGET_PRODUCT_DIRECTORY: " $(ANDROID_OUT_TARGET_PRODUCT_DIRECTORY)
+ @echo "ANDROID_BSP_ROOT: " $(ANDROID_BSP_ROOT)
+ @echo "KERNEL_BUILD_DIR: " $(KERNEL_BUILD_DIR)
+ @echo "MM_INSTALL_DIR: " $(MM_INSTALL_DIR)
+ @echo "PLATFORM: " $(PLATFORM)
+ @echo "PREFIX: " $(PREFIX)
+ @echo "MMROOT: " $(MMROOT)
+ @echo "INSTALL_MOD_PATH: " $(INSTALL_MOD_PATH)
+ @echo "FOO: " $(FOO)
+ @echo "VARIANT: " $(VARIANT)
+ @echo "\033[0m"
+
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h
new file mode 100644
index 00000000000..3bd928aefdb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void * ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+ UMP_DD_SUCCESS, /**< indicates success */
+ UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+ unsigned long addr; /**< The physical address of the block */
+ unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h
new file mode 100644
index 00000000000..70eea22d3c3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_interface_ref_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h
new file mode 100644
index 00000000000..55c49885f06
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/include/ump/ump_kernel_platform.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile
new file mode 100644
index 00000000000..20b57359167
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile
@@ -0,0 +1,346 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKOS=linux
+FILES_PREFIX=
+
+ifneq ($(KBUILD_EXTMOD),)
+DRIVER_DIR=$(KBUILD_EXTMOD)
+else
+src?=.
+srctree?=.
+DRIVER_DIR?=$(srctree)/$(src)
+M?=$(DRIVER_DIR)
+endif
+MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
+include $(DRIVER_DIR)/Makefile.platform
+include $(DRIVER_DIR)/Makefile.common
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+USING_MMU ?= 1
+USING_UMP ?= 0
+USING_HWMEM ?= 0
+USING_OS_MEMORY ?= 0
+USING_PMM ?= 0
+USING_GPU_UTILIZATION ?= 0
+USING_MALI_RUN_TIME_PM ?= 0
+USING_MALI_PMM_TESTSUITE ?= 0
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 6
+USING_PROFILING ?= 0
+TIMESTAMP ?= default
+BUILD ?= debug
+TARGET_PLATFORM ?= default
+
+ifeq ($(USING_UMP),1)
+ifneq ($(USING_HWMEM),1)
+ UMP_SYMVERS_FILE = ../ump/Module.symvers
+ KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTMOD)/$(UMP_SYMVERS_FILE)
+endif
+endif
+
+# Check if a Mali Core sub module should be enabled, true or false returned
+submodule_enabled = $(shell gcc $(DEFINES) -E $1/arch/config.h | grep type | grep -c $(2))
+
+# linux build system integration
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_OS_MEMORY=$(USING_OS_MEMORY)
+DEFINES += -DUSING_MMU=$(USING_MMU)
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -DUSING_HWMEM=$(USING_HWMEM)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DMALI_TIMELINE_PROFILING_ENABLED=$(USING_PROFILING)
+DEFINES += -DMALI_POWER_MGMT_TEST_SUITE=$(USING_MALI_PMM_TESTSUITE)
+DEFINES += -DMALI_PMM_RUNTIME_JOB_CONTROL_ON=$(USING_MALI_RUN_TIME_PM)
+DEFINES += -DMALI_STATE_TRACKING=1
+DEFINES += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+
+ifneq ($(call submodule_enabled, $M, PMU),0)
+ MALI_PLATFORM_FILE = platform/mali400-pmu/mali_platform.c
+else
+ MALI_PLATFORM_FILE = platform/$(TARGET_PLATFORM)/mali_platform.c
+endif
+
+DEFINES += -DUSING_MALI_PMM=$(USING_PMM)
+DEFINES += -DMALI_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+
+ifeq ($(BUILD), debug)
+DEFINES += -DDEBUG
+endif
+DEFINES += -DSVN_REV=$(SVN_REV)
+DEFINES += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
+# Linux has its own mmap cleanup handlers (see mali_kernel_mem_mmu.c)
+DEFINES += -DMALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+
+ifeq ($(USING_UMP),1)
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=1
+ EXTRA_CFLAGS += -I$(DRIVER_DIR)/../../../include/ump
+else
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=0
+endif
+
+# Use our defines when compiling
+EXTRA_CFLAGS += $(DEFINES) -I$(DRIVER_DIR) -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/linux/license/proprietary
+else
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/linux/license/gpl
+endif
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/common/pmm
+
+# Source files which always are included in a build
+SRC = \
+ common/mali_kernel_core.c \
+ linux/mali_kernel_linux.c \
+ $(OSKOS)/mali_osk_indir_mmap.c \
+ common/mali_kernel_rendercore.c \
+ common/mali_kernel_descriptor_mapping.c \
+ common/mali_kernel_vsync.c \
+ linux/mali_ukk_vsync.c \
+ $(MALI_PLATFORM_FILE) \
+ $(OSKFILES) \
+ $(UKKFILES)
+ #__malidrv_build_info.c
+
+ifeq ($(USING_PROFILING),1)
+SRC += \
+ common/mali_kernel_profiling.c \
+ timestamp-$(TIMESTAMP)/mali_timestamp.c
+EXTRA_CFLAGS += -I$(DRIVER_DIR)/timestamp-$(TIMESTAMP)
+endif
+
+# Selecting files to compile by parsing the config file
+
+ifeq ($(USING_PMM),1)
+SRC += \
+ common/pmm/mali_pmm.c \
+ common/pmm/mali_pmm_policy.c \
+ common/pmm/mali_pmm_policy_alwayson.c \
+ common/pmm/mali_pmm_policy_jobcontrol.c \
+ common/pmm/mali_pmm_state.c \
+ linux/mali_kernel_pm.c \
+ linux/mali_osk_pm.c \
+ linux/mali_device_pause_resume.c
+endif
+
+ifeq ($(USING_GPU_UTILIZATION),1)
+SRC += \
+ common/mali_kernel_utilization.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400PP),0)
+ # Mali-400 PP in use
+ EXTRA_CFLAGS += -DUSING_MALI400
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400GP),0)
+ # Mali-400 GP in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300PP),0)
+ # Mali-400 PP in use
+ EXTRA_CFLAGS += -DUSING_MALI400
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300GP),0)
+ # Mali-400 GP in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI200),0)
+ # Mali200 in use
+ EXTRA_CFLAGS += -DUSING_MALI200
+ SRC += common/mali_kernel_MALI200.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALIGP2),0)
+ # MaliGP2 in use
+ SRC += common/mali_kernel_GP2.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MMU),0)
+ # Mali MMU in use
+ SRC += common/mali_kernel_mem_mmu.c common/mali_kernel_memory_engine.c common/mali_block_allocator.c common/mali_kernel_mem_os.c
+else
+ # No Mali MMU in use
+ SRC += common/mali_kernel_mem_buddy.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI400L2)$(),0)
+ # Mali Level2 cache in use
+ EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+ SRC += common/mali_kernel_l2_cache.c
+endif
+
+ifneq ($(call submodule_enabled, $M, MALI300L2)$(),0)
+ # Mali Level2 cache in use
+ EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+ SRC += common/mali_kernel_l2_cache.c
+endif
+
+ifeq ($(USING_HWMEM),1)
+ # HWMEM used as backend for UMP api
+ EXTRA_CFLAGS += -I$(DRIVER_DIR)/../ump/common
+ SRC += platform/ux500/ump_kernel_api_hwmem.c
+endif
+
+# Target build file
+MODULE:=mali.ko
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_GPU_MALI) := $(MODULE:.ko=.o)
+# Tell the Linux build system to enable building of our .c files
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ # default to Virtex5
+ CONFIG ?= pb-virtex5-m200
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= pb11mp
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning Config $(CONFIG))
+$(warning Host CPU $(CPU))
+$(warning MMU $(USING_MMU))
+$(warning OS_MEMORY $(USING_OS_MEMORY))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that $(DRIVER_DIR)/arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf $(DRIVER_DIR)/arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+
+# Register number of Mali400 cores for routing test jobs to the right Mali-400
+ifeq ($CONFIG, pb-virtex5-m400-1)
+VERSION_STRINGS += USING_MALI400_PP_CORES=1
+endif
+ifeq ($CONFIG, pb-virtex5-m400-2)
+VERSION_STRINGS += USING_MALI400_PP_CORES=2
+endif
+ifeq ($CONFIG, pb-virtex5-m400-3)
+VERSION_STRINGS += USING_MALI400_PP_CORES=3
+endif
+ifeq ($CONFIG, pb-virtex5-m400-4)
+VERSION_STRINGS += USING_MALI400_PP_CORES=4
+endif
+
+endif
+
+# Filename to use when patching. Original will be saved as this
+PATCH_BACKUP_FILE:=config_makefile_patch_backup.h
+
+ifdef ARM_INTERNAL_BUILD
+$(warning Looking for previous backup)
+endif
+# Look for previous backup
+shell_output:=$(shell [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -vf arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored")
+ifneq ($(shell_output),)
+ifdef ARM_INTERNAL_BUILD
+$(warning $(shell_output))
+endif
+endif
+
+ifdef PATCH
+ifdef ARM_INTERNAL_BUILD
+$(warning Patching using: $(PATCH) )
+endif
+ifneq ($(shell [ -f arch/$(PATCH).diff ] && echo "OK"), OK)
+# The patch file does not exist
+shell_output:=$(shell echo "Possible PATCH arguments are:" ; find arch/ -name "*.diff" |sed -r 's/.*\/(.*)\.diff/\1/')
+ifdef ARM_INTERNAL_BUILD
+$(warning $(shell_output))
+endif
+$(error Could not find file arch-$(CONFIG)/$(PATCH).diff )
+else
+# Patch file found, do patching
+shell_output:=$(shell cp -f arch/config.h arch/$(PATCH_BACKUP_FILE) && patch --no-backup-if-mismatch -st -p0 arch/config.h -i arch/$(PATCH).diff && echo "OK")
+ifneq ($(shell_output), OK)
+$(warning Output from failed patch: $(shell_output))
+$(shell mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h)
+$(error Could not patch file arch-$(CONFIG)/config.h with arch-$(CONFIG)/$(PATCH).diff.)
+endif
+endif
+endif
+
+# Extend common version-string
+VERSION_STRINGS += BUILD=$(shell echo $(BUILD) | tr a-z A-Z)
+VERSION_STRINGS += CPU=$(CPU)
+VERSION_STRINGS += USING_MMU=$(USING_MMU)
+VERSION_STRINGS += USING_UMP=$(USING_UMP)
+VERSION_STRINGS += USING_HWMEM=$(USING_HWMEM)
+VERSION_STRINGS += USING_PMM=$(USING_PMM)
+VERSION_STRINGS += USING_MALI200=$(call submodule_enabled, ., MALI200)
+VERSION_STRINGS += USING_MALI400=$(call submodule_enabled, ., MALI400)
+VERSION_STRINGS += USING_MALI400_L2_CACHE=$(call submodule_enabled, ., MALI400L2)
+VERSION_STRINGS += USING_GP2=$(call submodule_enabled, ., MALIGP2)
+VERSION_STRINGS += KDIR=$(KDIR)
+
+all: make-build-info-file $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) CONFIG_GPU_MALI=m modules
+ @([ "$(PATCH)" != "" ] && [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored") || echo No Unpatching needed
+ @rm -f $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ @([ "$(PATCH)" != "" ] && [ -f arch/$(PATCH_BACKUP_FILE) ] && mv -f arch/$(PATCH_BACKUP_FILE) arch/config.h && echo "Patch backup restored") || echo No Unpatching needed
+
+make-build-info-file:
+ @echo 'char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(FILES_PREFIX)__malidrv_build_info.c
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+# end of outside kernel build system block
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common
new file mode 100644
index 00000000000..a8f4189d0cf
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.common
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_atomics.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_irq.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_locks.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_low_level_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_math.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_memory.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_misc.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_mali.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_notification.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_time.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_timers.c
+
+UKKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_gp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_pp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_core.c
+
+ifeq ($(USING_PROFILING),1)
+UKKFILES+=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_profiling.c
+endif
+
+ifeq ($(MALI_PLATFORM_FILE),)
+MALI_PLATFORM_FILE=platform/default/mali_platform.c
+endif
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+SVN_REV := $(shell (cd $(DRIVER_DIR); (svnversion | grep -qvE '(exported|Unversioned)' && svnversion) || git svn info | grep '^Revision: '| sed -e 's/^Revision: //' ) 2>/dev/null )
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+# Common version-string, will be extended by OS-specifc sections
+VERSION_STRINGS =
+VERSION_STRINGS += CONFIG=$(CONFIG)
+VERSION_STRINGS += USING_OS_MEMORY=$(USING_OS_MEMORY)
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)common\/mali_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'URL: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Rev: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Date: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform
new file mode 100644
index 00000000000..1f8068e25de
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/Makefile.platform
@@ -0,0 +1,45 @@
+#these are the build options for the ST-Ericsson Ux500 platforms.
+
+ifeq ($(CONFIG_UX500_SOC_DB5500),y)
+TARGET_PLATFORM = default
+USING_GPU_UTILIZATION = 0
+DEFINES += -DSOC_DB5500=1
+endif
+
+CONFIG = ux500
+CPU = $(CONFIG)
+TARGET_PLATFORM ?= $(CONFIG)
+ARCH ?= arm
+USING_MMU ?= 1
+USING_PMM ?= 1
+USING_UMP ?= 1
+USING_HWMEM ?= 1
+USING_OS_MEMORY ?= 1
+USING_GPU_UTILIZATION ?= 1
+
+ifeq ($(CONFIG_GPU_MALI_DEBUG),y)
+BUILD ?= debug
+else
+BUILD ?= release
+endif
+
+KDIR-$(CPU)=$(srctree)
+
+#these are paths relative to the mali400ko/driver/src/devicedrv/mali folder
+#not to be confused with the drivers/gpu/mali symlink in the kernel tree
+EXTRA_CFLAGS += -I$(realpath $(DRIVER_DIR)/../../../include/ump)
+EXTRA_CFLAGS += -I$(realpath $(DRIVER_DIR)/../ump/common)
+
+#The following is duplicated from the main Makefile to ensure that the 'arch'
+#link is created even during an in-kernel build.
+
+# Validate selected config
+ifneq ($(shell [ -d $(DRIVER_DIR)/arch-$(CONFIG) ] && [ -f $(DRIVER_DIR)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that $(DRIVER_DIR)/arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L $(DRIVER_DIR)/arch ] && rm $(DRIVER_DIR)/arch)
+$(shell ln -sf $(DRIVER_DIR)/arch-$(CONFIG) $(DRIVER_DIR)/arch)
+$(shell touch $(DRIVER_DIR)/arch/config.h)
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h
new file mode 100644
index 00000000000..5011c97a28c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m300/config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-300 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI300GP,
+ .description = "Mali-300 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI300PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-300 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-300 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-300 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI300L2,
+ .base = 0xC0001000,
+ .description = "Mali-300 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h
new file mode 100644
index 00000000000..ee53f49bfe0
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-direct/config.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = OS_MEMORY,
+ .description = "OS Memory",
+ .alloc_order = 10, /* Lowest preference for this memory */
+ .size = 96 * 1024 * 1024, /* 96 MB */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = 0,
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0x80000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h
new file mode 100644
index 00000000000..5c7c7f3ef13
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1-pmu/config.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = PMU,
+ .description = "Mali-400 PMU",
+ .base = 0xC0002000,
+ .irq = -1,
+ .mmu_id = 0
+
+ },
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h
new file mode 100644
index 00000000000..9f675fd1d23
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-1/config.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h
new file mode 100644
index 00000000000..cc1f05cb5f4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-2/config.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+},
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = -1,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = -1,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h
new file mode 100644
index 00000000000..a672e7d22ef
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-3/config.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the PB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000C000,
+ .irq = -1,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = 102,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = 0xC0006000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h
new file mode 100644
index 00000000000..ae9100c1ae4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-pb-virtex5-m400-4/config.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the EB platform with ZBT memory enabled */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = 0xC0000000,
+ .irq = -1,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc0008000,
+ .irq = -1,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000A000,
+ .irq = -1,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000C000,
+ .irq = -1,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MALI400PP,
+ .base = 0xc000E000,
+ .irq = -1,
+ .description = "Mali-400 PP 3",
+ .mmu_id = 5
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = 0xC0003000,
+ .irq = 102,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = 0xC0004000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = 0xC0005000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = 0xC0006000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MMU,
+ .base = 0xC0007000,
+ .irq = 102,
+ .description = "Mali-400 MMU for PP 3",
+ .mmu_id = 5
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM remapped to baseboard",
+ .cpu_usage_adjust = -0x50000000,
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = 0xD0000000,
+ .size = 0x10000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEMORY,
+ .description = "Mali ZBT",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .base = 0xe1000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0xe0000000,
+ .size = 0x01000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = 0xC0001000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h
new file mode 100644
index 00000000000..dc324cf0d39
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/arch-ux500/config.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Architecture configuration for ST-Ericsson Ux500 platforms
+ *
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#ifndef __ARCH_UX500_CONFIG_H__
+#define __ARCH_UX500_CONFIG_H__
+
+/* #include <mach/hardware.h> */
+
+#define U5500_SGA_BASE 0x801D0000
+#define U8500_SGA_BASE 0xA0300000
+
+#if defined(SOC_DB5500) && (1 == SOC_DB5500)
+#define UX500_SGA_BASE U5500_SGA_BASE
+#else
+#define UX500_SGA_BASE U8500_SGA_BASE
+#endif
+
+#define MEGABYTE (1024*1024)
+#define MALI_MEM_BASE (128 * MEGABYTE)
+#define MALI_MEM_SIZE ( 32 * MEGABYTE)
+#define OS_MEM_SIZE (128 * MEGABYTE)
+
+/* Hardware revision u8500 v1: GX570-BU-00000-r0p1
+ * Hardware revision u8500 v2: GX570-BU-00000-r1p0
+ * Hardware revision u5500: GX570-BU-00000-r1p0
+ * configuration registers: 0xA0300000-0xA031FFFFF (stw8500v1_usermanual p269)
+ *
+ * Shared Peripheral Interrupt assignments: (stw8500v1_usermanual p265-266)
+ * Nb | Interrupt Source
+ * 116 | Mali400 combined
+ * 115 | Mali400 geometry processor
+ * 114 | Mali400 geometry processor MMU
+ * 113 | Mali400 pixel processor
+ * 112 | Mali400 pixel processor MMU
+ *
+ * irq offset: 32
+ */
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = UX500_SGA_BASE + 0x0000,
+ .irq = 115+32,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = UX500_SGA_BASE + 0x8000,
+ .irq = 113+32,
+ .description = "Mali-400 PP",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = UX500_SGA_BASE + 0x3000,
+ .irq = 114+32,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = UX500_SGA_BASE + 0x4000,
+ .irq = 112+32,
+ .description = "Mali-400 MMU for PP",
+ .mmu_id = 2
+ },
+#endif
+ {
+ .type = MEMORY,
+ .description = "Mali SDRAM",
+ .alloc_order = 0, /* Highest preference for this memory */
+ .base = MALI_MEM_BASE,
+ .size = MALI_MEM_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+#if USING_OS_MEMORY
+ {
+ .type = OS_MEMORY,
+ .description = "Linux kernel memory",
+ .alloc_order = 5, /* Medium preference for this memory */
+ .size = OS_MEM_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#endif
+ {
+ .type = MEM_VALIDATION,
+ .description = "Framebuffer",
+ .base = 0x00000000, /* Validate all memory for now */
+ .size = 2047 * MEGABYTE, /* "2GB ought to be enough for anyone" */
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_WRITEABLE | _MALI_PP_READABLE
+ },
+ {
+ .type = MALI400L2,
+ .base = UX500_SGA_BASE + 0x1000,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_UX500_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
new file mode 100644
index 00000000000..fc80a20c813
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, 105);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecssary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h
new file mode 100644
index 00000000000..c3b5761e23a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c
new file mode 100644
index 00000000000..e91abe120cb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_GP2.c
@@ -0,0 +1,1418 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if defined(USING_MALI400_L2_CACHE)
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#if defined(USING_MALI200)
+#define MALI_GP_SUBSYSTEM_NAME "MaliGP2"
+#define MALI_GP_CORE_TYPE _MALI_GP2
+#elif defined(USING_MALI400)
+#define MALI_GP_SUBSYSTEM_NAME "Mali-400 GP"
+#define MALI_GP_CORE_TYPE _MALI_400_GP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOBGP2_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, maligp_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_gp_id = -1;
+
+static mali_core_renderunit * last_gp_core_cookie = NULL;
+
+/* Describing a maligp job settings */
+typedef struct maligp_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_gp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 status_reg_on_stop;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 vscl_stop_addr;
+ u32 plbcl_stop_addr;
+ u32 heap_current_addr;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+ int is_stalled_waiting_for_more_memory;
+
+ u32 active_mask;
+ /* progress checking */
+ u32 last_vscl;
+ u32 last_plbcl;
+ /* extended progress checking, only enabled when we can use one of the performance counters */
+ u32 have_extended_progress_checking;
+ u32 vertices;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} maligp_job;
+
+/*Functions Exposed to the General External System through
+ function pointers.*/
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+void maligp_subsystem_dump_state(void);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core );
+static void maligp_raw_reset( mali_core_renderunit *core);
+static void maligp_reset_hard(struct mali_core_renderunit * core);
+static void maligp_reset(mali_core_renderunit *core);
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core );
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core);
+#endif
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core);
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core);
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument);
+static void subsystem_maligp_return_job_to_user(mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style );
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core);
+
+/* Variables */
+static register_address_and_value default_mgmt_regs[] =
+{
+ { MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED }
+};
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_gp2=
+{
+ maligp_subsystem_startup, /* startup */
+ maligp_subsystem_terminate, /* shutdown */
+#if USING_MMU
+ maligp_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ maligp_subsystem_core_system_info_fill, /* system_info_fill */
+ maligp_subsystem_session_begin, /* session_begin */
+ maligp_subsystem_session_end, /* session_end */
+#if USING_MMU
+ maligp_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ maligp_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_maligp ;
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ mali_subsystem_gp_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_maligp, 0, sizeof(*subsystem));
+
+ subsystem = &subsystem_maligp;
+ subsystem->start_job = &subsystem_maligp_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_maligp_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_maligp_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_maligp_get_new_job_from_user;
+ subsystem->suspend_response = &subsystem_maligp_suspend_response;
+ subsystem->return_job_to_user = &subsystem_maligp_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_maligp_renderunit_delete;
+ subsystem->reset_core = &subsystem_maligp_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_maligp_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_maligp_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_maligp_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_GP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_GP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALIGP2 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALIGP2, maligp_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400GP, maligp_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_maligp);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_maligp);
+}
+
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(*session) ), _MALI_OSK_ERR_FAULT);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_maligp;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+ /** @note mali_session_data not needed here */
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session *)*slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t for errors.
+ */
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_maligp, info);
+}
+
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ int err;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALIGP2 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400GP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_GP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_GP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_maligp ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALIGP2_REGISTER_ADDRESS_SPACE_SIZE ;
+ core->description = resource->description;
+ core->irq_nr = resource->irq ;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ core->pmm_id = MALI_PMM_CORE_GP;
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error1;
+
+ /* Check that the register mapping of the core works.
+ Return 0 if maligp core is present and accessible. */
+ if (mali_benchmark) {
+ core->core_version = MALI_GP_PRODUCT_ID << 16;
+ } else {
+ core->core_version = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = maligp_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ maligp_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_maligp, core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Initial Register settings:\n"));
+ maligp_print_regs(4, core);
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_maligp, message, data);
+}
+#endif
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core)
+{
+ if (debug_level <= mali_debug_level)
+ {
+ MALI_DEBUG_PRINT(1, (" VS 0x%08X 0x%08X, PLBU 0x%08X 0x%08X ALLOC 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+ MALI_DEBUG_PRINT(1, (" IntRaw 0x%08X IntMask 0x%08X, Status 0x%02X Ver: 0x%08X \n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_MASK),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION)));
+
+ MALI_DEBUG_PRINT(1, (" PERF_CNT Enbl:%d %d Src: %02d %02d VAL: 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+
+ MALI_DEBUG_PRINT(1, (" VS_START 0x%08X PLBU_START 0x%08X AXI_ERR 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_CONTR_AXI_BUS_ERROR_STAT)));
+ }
+}
+#endif
+
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+
+#if defined(USING_MALI400)
+ if ( MALI400_GP_PRODUCT_ID != mali_type && MALI300_GP_PRODUCT_ID != mali_type )
+#else
+ if ( MALI_GP_PRODUCT_ID != mali_type )
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from maligp version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali GP: core_version_legal: Reads correct mali version: %d\n", core->core_version )) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+static void maligp_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ maligp_raw_reset(core);
+ maligp_initialize_registers_mgmt(core);
+ }
+}
+
+
+static void maligp_reset_hard( mali_core_renderunit *core )
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+
+}
+
+static void maligp_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: maligp_raw_reset: %s\n", core->description)) ;
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali GP: Bus was never stopped during core reset\n"));
+
+ if (request_loop_count==i)
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing MMU bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* the bus was stopped OK, complete the reset */
+ /* use the hard reset routine to do the actual reset */
+ maligp_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & /*Bitwise OR*/
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if ( request_loop_count==i )
+ {
+#if USING_MMU
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ }
+ else
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ }
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+/* Sets the registers on maligp according to the const default_mgmt_regs array. */
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ int i;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_initialize_registers_mgmt: %s\n", core->description)) ;
+ for(i=0 ; i< (sizeof(default_mgmt_regs)/sizeof(*default_mgmt_regs)) ; ++i)
+ {
+ mali_core_renderunit_register_write(core, default_mgmt_regs[i].address, default_mgmt_regs[i].value);
+ }
+}
+
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ maligp_job *jobgp;
+ u32 startcmd;
+ /* The local extended version of the general structs */
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+
+ startcmd = 0;
+ if ( jobgp->user_input.frame_registers[0] != jobgp->user_input.frame_registers[1] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if ( jobgp->user_input.frame_registers[2] != jobgp->user_input.frame_registers[3] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ if(0 == startcmd)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)jobgp->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers Start\n"));
+ maligp_print_regs(4, core);
+#endif
+
+
+ mali_core_renderunit_register_write_array(
+ core,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR,
+ &(jobgp->user_input.frame_registers[0]),
+ sizeof(jobgp->user_input.frame_registers)/sizeof(jobgp->user_input.frame_registers[0]));
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != jobgp->user_input.perf_counter_flag )
+ {
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ jobgp->user_input.perf_counter_src0);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ jobgp->user_input.perf_counter_src1);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = jobgp->user_input.perf_counter_l2_src0;
+ }
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = jobgp->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&jobgp->perf_counter_l2_src0,
+ &jobgp->perf_counter_l2_val0,
+ &jobgp->perf_counter_l2_src1,
+ &jobgp->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ if ( 0 == (jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))
+ {
+ /* extended progress checking can be enabled */
+
+ jobgp->have_extended_progress_checking = 1;
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED
+ );
+
+ mali_core_renderunit_register_write(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ subsystem_flush_mapped_mem_cache();
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: STARTING GP WITH CMD: 0x%x\n", startcmd));
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(core,
+ MALIGP2_REG_ADDR_MGMT_CMD,
+ startcmd);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), jobgp->pid, jobgp->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: IRQ: %04x\n", irq_readout)) ;
+
+ if ( MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK , MALIGP2_REG_VAL_IRQ_MASK_NONE);
+ /* We do need to handle this in a bottom half, return 1 */
+ return 1;
+ }
+ return 0;
+}
+
+/* This function should check if the interrupt indicates that job was finished.
+If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+If the job is still working after this function it should return MALI_FALSE.
+The function must also enable the bits in the interrupt mask for the core.
+Called by the bottom half interrupt function. */
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core)
+{
+ mali_core_job * job;
+ maligp_job * jobgp;
+ u32 irq_readout;
+ u32 core_status;
+ u32 vscl;
+ u32 plbcl;
+
+ job = core->current_job;
+
+ if (mali_benchmark) {
+ MALI_DEBUG_PRINT(3, ("MaliGP: Job: Benchmark\n") );
+ irq_readout = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ core_status = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ jobgp = GET_JOBGP2_PTR(job);
+
+ jobgp->heap_current_addr = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+
+ vscl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR);
+ plbcl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Status: 0x%x\n",
+ (u32)jobgp->user_input.user_job_ptr, irq_readout , core_status )) ;
+
+ jobgp->irq_status |= irq_readout;
+ jobgp->status_reg_on_stop = core_status;
+
+ if ( 0 != jobgp->is_stalled_waiting_for_more_memory )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Mali GP: Job aborted - userspace would not provide more heap memory.\n"));
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+ /* finished ? */
+ else if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers On job end:\n"));
+ maligp_print_regs(4, core);
+#endif
+ MALI_DEBUG_PRINT_IF(5, irq_readout & 0x04, ("OOM when done, ignoring (reg.current = 0x%x, reg.end = 0x%x)\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+
+ if (0 != jobgp->user_input.perf_counter_flag )
+ {
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+ }
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+ return JOB_STATUS_END_SUCCESS; /* core idle */
+ }
+ /* sw watchdog timeout handling or time to do hang checking ? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ (
+ (CORE_HANG_CHECK_TIMEOUT == core->state) &&
+ (
+ (jobgp->have_extended_progress_checking ? (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE) == jobgp->vertices) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) ? (vscl == jobgp->last_vscl) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) ? (plbcl == jobgp->last_plbcl) : 1/*TRUE*/)
+ )
+ )
+ )
+ {
+ /* no progress detected, killed by the watchdog */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali GP: SW-Timeout. Regs:\n"));
+ if (core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) MALI_DEBUG_PRINT(1, ("vscl current = 0x%x last = 0x%x\n", (void*)vscl, (void*)jobgp->last_vscl));
+ if (core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) MALI_DEBUG_PRINT(1, ("plbcl current = 0x%x last = 0x%x\n", (void*)plbcl, (void*)jobgp->last_plbcl));
+ if (jobgp->have_extended_progress_checking) MALI_DEBUG_PRINT(1, ("vertices processed = %d, last = %d\n", mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE),
+ jobgp->vertices));
+#ifdef DEBUG
+ maligp_print_regs(2, core);
+#endif
+ return JOB_STATUS_END_HANG;
+ }
+ /* if hang timeout checking was enabled and we detected progress, will be fall down to this check */
+ /* check for PLBU OOM before the hang check to avoid the race condition of the hw wd trigging while waiting for us to handle the OOM interrupt */
+ else if ( 0 != (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM))
+ {
+ mali_core_session *session;
+ _mali_osk_notification_t *notific;
+ _mali_uk_gp_job_suspended_s * suspended_job;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("OOM, new heap requested by GP\n"));
+ MALI_DEBUG_PRINT(4, ("Status when OOM: current = 0x%x, end = 0x%x\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+ notific = _mali_osk_notification_create(
+
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof( _mali_uk_gp_job_suspended_s )
+ );
+ if ( NULL == notific)
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification object\n")) ;
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+
+ core->state = CORE_WORKING;
+ jobgp->is_stalled_waiting_for_more_memory = 1;
+ suspended_job = (_mali_uk_gp_job_suspended_s *)notific->result_buffer; /* this is ok - result_buffer was malloc'd */
+
+ suspended_job->user_job_ptr = jobgp->user_input.user_job_ptr;
+ suspended_job->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY ;
+ suspended_job->cookie = (u32) core;
+ last_gp_core_cookie = core;
+
+ _mali_osk_notification_queue_send( session->notification_queue, notific);
+
+#ifdef DEBUG
+ maligp_print_regs(4, core);
+#endif
+
+ /* stop all active timers */
+ _mali_osk_timer_del( core->timer);
+ _mali_osk_timer_del( core->timer_hang_detection);
+ MALI_DEBUG_PRINT(4, ("Mali GP: PLBU heap empty, sending memory request to userspace\n"));
+ /* save to watchdog_jiffies what was remaining WD timeout value when OOM was triggered */
+ job->watchdog_jiffies = (long)job->watchdog_jiffies - (long)_mali_osk_time_tickcount();
+ /* reuse core->timer as the userspace response timeout handler */
+ _mali_osk_timer_add( core->timer, _mali_osk_time_mstoticks(1000) ); /* wait max 1 sec for userspace to respond */
+ return JOB_STATUS_CONTINUE_RUN; /* The core is NOT available for new jobs. */
+ }
+ /* hw watchdog is reporting a new hang or an existing progress-during-hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & jobgp->active_mask & MALIGP2_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("Mali GP: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ core->state = CORE_WORKING;
+
+ /* save state for the progress checking */
+ jobgp->last_vscl = vscl;
+ jobgp->last_plbcl = plbcl;
+ if (jobgp->have_extended_progress_checking)
+ {
+ jobgp->vertices = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add( core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ jobgp->active_mask &= ~MALIGP2_REG_VAL_IRQ_HANG; /* ignore the hw watchdog from now on */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finihsed */ }
+ /* no errors, but still working */
+ else if ( ( 0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ERROR)) &&
+ ( 0 != (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE ))
+ )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN;
+ }
+ /* Else there must be some error */
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali GP: Core crashed? *IRQ: 0x%x Status: 0x%x\n", irq_readout, core_status ));
+ #ifdef DEBUG
+ MALI_DEBUG_PRINT(1, ("Mali GP: Registers Before reset:\n"));
+ maligp_print_regs(1, core);
+ #endif
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ maligp_job *jobgp;
+ mali_core_job *job;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_gp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_gp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(jobgp = (maligp_job *) _mali_osk_calloc(1, sizeof(maligp_job)), _MALI_OSK_ERR_FAULT);
+
+ /* Copy the job data from the U/K interface */
+ if ( NULL == _mali_osk_memcpy(&jobgp->user_input, user_ptr_job_input, sizeof(_mali_uk_gp_start_job_s) ) )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: subsystem_maligp_get_new_job_from_user 0x%x\n", (void*)jobgp->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job Regs: 0x%08X 0x%08X, 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ jobgp->user_input.frame_registers[0],
+ jobgp->user_input.frame_registers[1],
+ jobgp->user_input.frame_registers[2],
+ jobgp->user_input.frame_registers[3],
+ jobgp->user_input.frame_registers[4],
+ jobgp->user_input.frame_registers[5]) );
+
+
+ job = GET_JOB_EMBEDDED_PTR(jobgp);
+
+ job->session = session;
+ job_priority_set(job, jobgp->user_input.priority);
+ job_watchdog_set(job, jobgp->user_input.watchdog_msecs );
+ jobgp->heap_current_addr = jobgp->user_input.frame_registers[4];
+
+ job->abort_id = jobgp->user_input.abort_id;
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ jobgp->pid = _mali_osk_get_pid();
+ jobgp->tid = _mali_osk_get_tid();
+#endif
+
+ if (NULL != session->job_waiting_to_run)
+ {
+ /* IF NOT( newjow HAS HIGHER PRIORITY THAN waitingjob) EXIT_NOT_START new job */
+ if(!job_has_higher_priority(job, session->job_waiting_to_run))
+ {
+ /* The job we try to add does NOT have higher pri than current */
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+ }
+
+ /* We now know that we have a job, and a slot to put it in */
+
+ jobgp->active_mask = MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ jobgp->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_FINISHED,
+ sizeof(_mali_uk_gp_job_finished_s) );
+
+ if ( NULL == jobgp->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x INPUT from user.\n", (u32)jobgp->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ err = mali_core_session_add_job(session, job, &previous_replaced_job);
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Internal error\n")) ;
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( jobgp->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ maligp_job *previous_replaced_jobgp;
+
+ previous_replaced_jobgp = GET_JOBGP2_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Replacing job: 0x%08x\n", (u32)previous_replaced_jobgp->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_jobgp->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free jobgp,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if ( _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(jobgp);
+ }
+ MALI_ERROR(err);
+}
+
+
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument)
+{
+ mali_core_renderunit *core;
+ maligp_job *jobgp;
+ mali_core_job *job;
+
+ _mali_uk_gp_suspend_response_s * suspend_response;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_suspend_response\n"));
+
+ suspend_response = (_mali_uk_gp_suspend_response_s *)argument;
+
+ /* We read job data from User */
+ /* On a single mali_gp system we can only have one Stalled GP,
+ and therefore one stalled request with a cookie. This checks
+ that we get the correct cookie */
+ if ( last_gp_core_cookie != (mali_core_renderunit *)suspend_response->cookie )
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GP: Got an illegal cookie from Userspace.\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ core = (mali_core_renderunit *)suspend_response->cookie;
+ last_gp_core_cookie = NULL;
+ job = core->current_job;
+ jobgp = GET_JOBGP2_PTR(job);
+
+ switch( suspend_response->code )
+ {
+ case _MALIGP_JOB_RESUME_WITH_NEW_HEAP :
+ MALI_DEBUG_PRINT(5, ("MALIGP_JOB_RESUME_WITH_NEW_HEAP jiffies: %li\n", _mali_osk_time_tickcount()));
+ MALI_DEBUG_PRINT(4, ("New Heap addr 0x%08x - 0x%08x\n", suspend_response->arguments[0], suspend_response->arguments[1]));
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+ job->watchdog_jiffies += _mali_osk_time_tickcount(); /* convert to absolute time again */
+ _mali_osk_timer_mod( core->timer, job->watchdog_jiffies); /* update the timer */
+
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, suspend_response->arguments[0]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, suspend_response->arguments[1]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0);
+#endif
+
+ MALI_DEBUG_PRINT(4, ("GP resumed with new heap\n"));
+
+ break;
+
+ case _MALIGP_JOB_ABORT:
+ MALI_DEBUG_PRINT(3, ("MALIGP_JOB_ABORT on heap extend request\n"));
+ _mali_osk_irq_schedulework( core->irq );
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Wrong Suspend response from userspace\n"));
+ }
+ MALI_SUCCESS;
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_maligp_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status )
+{
+ maligp_job *jobgp;
+ _mali_uk_gp_job_finished_s * job_out;
+ _mali_uk_gp_start_job_s* job_input;
+ mali_core_session *session;
+
+
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+ job_out = (_mali_uk_gp_job_finished_s *)jobgp->notification_obj->result_buffer; /* OK - this should've been malloc'd */
+ job_input= &(jobgp->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: Job: 0x%08x OUTPUT to user. Runtime: %d ms, irq readout %x\n",
+ (u32)jobgp->user_input.user_job_ptr,
+ job->render_time_msecs,
+ jobgp->irq_status)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_gp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+
+ job_out->irq_status = jobgp->irq_status;
+ job_out->status_reg_on_stop = jobgp->status_reg_on_stop;
+ job_out->vscl_stop_addr = 0;
+ job_out->plbcl_stop_addr = 0;
+ job_out->heap_current_addr = jobgp->heap_current_addr;
+ job_out->perf_counter0 = jobgp->perf_counter0;
+ job_out->perf_counter1 = jobgp->perf_counter1;
+ job_out->perf_counter_src0 = jobgp->user_input.perf_counter_src0 ;
+ job_out->perf_counter_src1 = jobgp->user_input.perf_counter_src1 ;
+ job_out->render_time = job->render_time_msecs;
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = jobgp->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = jobgp->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = jobgp->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = jobgp->perf_counter_l2_val1;
+#endif
+
+ _mali_osk_notification_queue_send( session->notification_queue, jobgp->notification_obj);
+ jobgp->notification_obj = NULL;
+
+ _mali_osk_free(jobgp);
+
+ last_gp_core_cookie = NULL;
+}
+
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: maligp_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ maligp_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ maligp_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ maligp_reset_hard(core);
+ maligp_initialize_registers_mgmt(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ break;
+ }
+}
+
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG );
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ if ( MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_suspend_response(session, args);
+}
+
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power up core - queue_only: %d\n", queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_maligp, 0, queue_only ) );
+}
+
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power down core - immediate_only: %d\n", immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_maligp, 0, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+void maligp_subsystem_dump_state(void)
+{
+ mali_core_renderunit_dump_state(&subsystem_maligp);
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c
new file mode 100644
index 00000000000..0ac49379bea
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_MALI200.c
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_core.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#ifdef USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#include "mali_osk_list.h"
+
+#if defined(USING_MALI200)
+#define MALI_PP_SUBSYSTEM_NAME "Mali200"
+#define MALI_PP_CORE_TYPE _MALI_200
+#elif defined(USING_MALI400)
+#define MALI_PP_SUBSYSTEM_NAME "Mali-400 PP"
+#define MALI_PP_CORE_TYPE _MALI_400_PP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOB200_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, mali200_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_mali200_id = -1;
+
+/* Describing a mali200 job settings */
+typedef struct mali200_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_pp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 last_tile_list_addr; /* Neccessary to continue a stopped job */
+
+ u32 active_mask;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+ u32 perf_counter_l2_val0_raw;
+ u32 perf_counter_l2_val1_raw;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} mali200_job;
+
+
+/*Functions Exposed to the General External System through
+ funciont pointers.*/
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+void mali200_subsystem_dump_state(void);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core );
+static void mali200_reset(mali_core_renderunit *core);
+static void mali200_reset_hard(struct mali_core_renderunit * core);
+static void mali200_raw_reset(mali_core_renderunit * core);
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core );
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core);
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style);
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core);
+static u32 subsystem_mali200_irq_handler_upper_half(struct mali_core_renderunit * core);
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core);
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_mali200=
+{
+ mali200_subsystem_startup, /* startup */
+ mali200_subsystem_terminate, /* shutdown */
+#if USING_MMU
+ mali200_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ mali200_subsystem_core_system_info_fill, /* system_info_fill */
+ mali200_subsystem_session_begin, /* session_begin */
+ mali200_subsystem_session_end, /* session_end */
+#if USING_MMU
+ mali200_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ mali200_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_mali200 ;
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ mali_subsystem_mali200_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_mali200, 0, sizeof(subsystem_mali200));
+
+ subsystem = &subsystem_mali200;
+ subsystem->start_job = &subsystem_mali200_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_mali200_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_mali200_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_mali200_get_new_job_from_user;
+ subsystem->return_job_to_user = &subsystem_mali200_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_mali200_renderunit_delete;
+ subsystem->reset_core = &subsystem_mali200_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_mali200_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_mali200_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_mali200_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_PP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_PP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALI200 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI200, mali200_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400PP, mali200_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_mali200);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_mali200);
+}
+
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(mali_core_session) ), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_mali200;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session*) *slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_mali200, info);
+}
+
+
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALI200 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400PP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_PP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_PP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_mali200 ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALI200_REG_SIZEOF_REGISTER_BANK ;
+ core->irq_nr = resource->irq ;
+ core->description = resource->description;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ switch( subsystem_mali200.number_of_cores )
+ {
+ case 0:
+ core->pmm_id = MALI_PMM_CORE_PP0;
+ break;
+ case 1:
+ core->pmm_id = MALI_PMM_CORE_PP1;
+ break;
+ case 2:
+ core->pmm_id = MALI_PMM_CORE_PP2;
+ break;
+ case 3:
+ core->pmm_id = MALI_PMM_CORE_PP3;
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown supported core for PMM\n"));
+ err = _MALI_OSK_ERR_FAULT;
+ goto exit_on_error0;
+ }
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to map register\n"));
+ goto exit_on_error1;
+ }
+
+ /* Check that the register mapping of the core works.
+ Return 0 if Mali PP core is present and accessible. */
+ if (mali_benchmark) {
+#if defined(USING_MALI200)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 5 /* Fake Mali200-r0p5 */;
+#elif defined(USING_MALI400)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 0x0101 /* Fake Mali400-r1p1 */;
+#else
+#error "No supported mali core defined"
+#endif
+ } else {
+ core->core_version = mali_core_renderunit_register_read(
+ core,
+ MALI200_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = mali200_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid core\n"));
+ goto exit_on_error2;
+ }
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ mali200_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_mali200, core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to register with core\n"));
+ goto exit_on_error2;
+ }
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR(err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_mali200, message, data);
+}
+#endif
+
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+#if defined(USING_MALI400)
+ /* Mali300 and Mali400 is compatible, accept either core. */
+ if (MALI400_PP_PRODUCT_ID != mali_type && MALI300_PP_PRODUCT_ID != mali_type)
+#else
+ if (MALI_PP_PRODUCT_ID != mali_type)
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from " MALI_PP_SUBSYSTEM_NAME " version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP: core_version_legal: Reads correct mali version: %d\n", mali_type) ) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+static void mali200_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: mali200_raw_reset: %s\n", core->description));
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali PP: Bus was never stopped during core reset\n"));
+
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* use the hard reset routine to do the actual reset */
+ mali200_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+ else
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+static void mali200_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ mali200_raw_reset(core);
+ mali200_initialize_registers_mgmt(core);
+ }
+}
+
+/* Sets the registers on mali200 according to the const default_mgmt_regs array. */
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_initialize_registers_mgmt: %s\n", core->description)) ;
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ mali200_job *job200;
+
+ /* The local extended version of the general structs */
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if ( (0 == job200->user_input.frame_registers[0]) ||
+ (0 == job200->user_input.frame_registers[1]) )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)job200->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x START_RENDER Tile_list: 0x%08x\n",
+ (u32)job200->user_input.user_job_ptr,
+ job200->user_input.frame_registers[0]));
+ MALI_DEBUG_PRINT(6, ("Mali PP: RSW base addr: 0x%08x Vertex base addr: 0x%08x\n",
+ job200->user_input.frame_registers[1], job200->user_input.frame_registers[2]));
+
+ /* Frame registers. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_FRAME,
+ &(job200->user_input.frame_registers[0]),
+ MALI200_NUM_REGS_FRAME);
+
+ /* Write Back unit 0. Copy from mem to physical registers*/
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB0,
+ &(job200->user_input.wb0_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+ /* Write Back unit 1. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB1,
+ &(job200->user_input.wb1_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+ /* Write Back unit 2. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB2,
+ &(job200->user_input.wb2_registers[0]),
+ MALI200_NUM_REGS_WBx);
+
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != job200->user_input.perf_counter_flag )
+ {
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ job200->user_input.perf_counter_src0);
+
+ }
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ job200->user_input.perf_counter_src1);
+
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = job200->user_input.perf_counter_l2_src0;
+ }
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = job200->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&job200->perf_counter_l2_src0,
+ &job200->perf_counter_l2_val0,
+ &job200->perf_counter_l2_src1,
+ &job200->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ subsystem_flush_mapped_mem_cache();
+ _mali_osk_mem_barrier();
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), job200->pid, job200->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+static u32 subsystem_mali200_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_irq_handler_upper_half: %s\n", core->description)) ;
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+ return 1;
+ }
+ return 0;
+}
+
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+ u32 current_tile_addr;
+ u32 core_status;
+ mali_core_job * job;
+ mali200_job * job200;
+
+ job = core->current_job;
+ job200 = GET_JOB200_PTR(job);
+
+
+ if (mali_benchmark) {
+ irq_readout = MALI200_REG_VAL_IRQ_END_OF_FRAME;
+ current_tile_addr = 0;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+ current_tile_addr = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR);
+ core_status = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ job200->irq_status |= irq_readout;
+
+ MALI_DEBUG_PRINT_IF( 3, ( 0 != irq_readout ),
+ ("Mali PP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status));
+
+ if ( MALI200_REG_VAL_IRQ_END_OF_FRAME & irq_readout)
+ {
+#if defined(USING_MALI200)
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES);
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ if (0 != job200->user_input.perf_counter_flag )
+ {
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ job200->perf_counter0 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ job200->perf_counter1 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (job200->perf_counter_l2_src0 == src0)
+ {
+ job200->perf_counter_l2_val0_raw = val0;
+ job200->perf_counter_l2_val0 = val0 - job200->perf_counter_l2_val0;
+ }
+ else
+ {
+ job200->perf_counter_l2_val0_raw = 0;
+ job200->perf_counter_l2_val0 = 0;
+ }
+
+ if (job200->perf_counter_l2_src1 == src1)
+ {
+ job200->perf_counter_l2_val1_raw = val1;
+ job200->perf_counter_l2_val1 = val1 - job200->perf_counter_l2_val1;
+ }
+ else
+ {
+ job200->perf_counter_l2_val1_raw = 0;
+ job200->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+ }
+
+ return JOB_STATUS_END_SUCCESS; /* reschedule */
+ }
+ /* Overall SW watchdog timeout or (time to do hang checking and progress detected)? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ ((CORE_HANG_CHECK_TIMEOUT == core->state) && (current_tile_addr == job200->last_tile_list_addr))
+ )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+ /* no progress detected, killed by the watchdog */
+ MALI_DEBUG_PRINT(2, ("M200: SW-Timeout Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x.\n", irq_readout ,current_tile_addr ,core_status) );
+ /* In this case will the system outside cleanup and reset the core */
+ return JOB_STATUS_END_HANG;
+ }
+ /* HW watchdog triggered or an existing hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & job200->active_mask & MALI200_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("M200: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ job200->last_tile_list_addr = current_tile_addr;
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add(core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ job200->active_mask &= ~MALI200_REG_VAL_IRQ_HANG; /* ignore the hw watchdoig from now on */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout & ~MALI200_REG_VAL_IRQ_HANG);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finished */
+ }
+ /* No irq pending, core still busy */
+ else if ((0 == (irq_readout & MALI200_REG_VAL_IRQ_MASK_USED)) && ( 0 != (core_status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)))
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* Not finished */
+ }
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ MALI_DEBUG_PRINT(1, ("Mali PP: Job: 0x%08x CRASH? Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status) ) ;
+
+ if (irq_readout & MALI200_REG_VAL_IRQ_BUS_ERROR)
+ {
+ u32 bus_error = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS);
+
+ MALI_DEBUG_PRINT(1, ("Bus error status: 0x%08X\n", bus_error));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x01), ("Bus write error from id 0x%02x\n", (bus_error>>2) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x02), ("Bus read error from id 0x%02x\n", (bus_error>>6) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (0 == (bus_error & 0x03)), ("Bus error but neither read or write was set as the error reason\n"));
+ (void)bus_error;
+ }
+
+ return JOB_STATUS_END_UNKNOWN_ERR; /* reschedule */
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ mali200_job *job200;
+ mali_core_job *job;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_pp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_pp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(job200 = (mali200_job *) _mali_osk_malloc(sizeof(mali200_job)), _MALI_OSK_ERR_NOMEM);
+ _mali_osk_memset(job200, 0 , sizeof(mali200_job) );
+
+ /* We read job data from Userspace pointer */
+ if ( NULL == _mali_osk_memcpy((void*)&job200->user_input, user_ptr_job_input, sizeof(job200->user_input)) )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_get_new_job_from_user 0x%x\n", (void*)job200->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: Frameregs: 0x%x 0x%x 0x%x Writeback[1] 0x%x, Pri:%d; Watchd:%d\n",
+ job200->user_input.frame_registers[0], job200->user_input.frame_registers[1], job200->user_input.frame_registers[2],
+ job200->user_input.wb0_registers[1], job200->user_input.priority,
+ job200->user_input.watchdog_msecs));
+
+ if ( job200->user_input.perf_counter_flag)
+ {
+#if defined(USING_MALI400_L2_CACHE)
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x l2_src0:0x%x l2_src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1,
+ job200->user_input.perf_counter_l2_src0,
+ job200->user_input.perf_counter_l2_src1));
+#else
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1));
+#endif
+ }
+
+ job = GET_JOB_EMBEDDED_PTR(job200);
+
+ job->session = session;
+ job_priority_set(job, job200->user_input.priority);
+ job_watchdog_set(job, job200->user_input.watchdog_msecs );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ job200->pid = _mali_osk_get_pid();
+ job200->tid = _mali_osk_get_tid();
+#endif
+
+ job->abort_id = job200->user_input.abort_id;
+ if (NULL != session->job_waiting_to_run)
+ {
+ /* IF NOT( newjow HAS HIGHER PRIORITY THAN waitingjob) EXIT_NOT_START newjob */
+ if(!job_has_higher_priority(job, session->job_waiting_to_run))
+ {
+ /* The job we try to add does NOT have higher pri than current */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+ }
+
+ /* We now know that we has a job, and a empty session slot to put it in */
+
+ job200->active_mask = MALI200_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ job200->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_PP_FINISHED,
+ sizeof(_mali_uk_pp_job_finished_s) );
+
+ if ( NULL == job200->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x INPUT from user.\n", (u32)job200->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ if ( _MALI_OSK_ERR_OK != mali_core_session_add_job(session, job, &previous_replaced_job))
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Internal error\n")) ;
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( job200->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ mali200_job *previous_replaced_job200;
+
+ previous_replaced_job200 = GET_JOB200_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Replacing job: 0x%08x\n", (u32)previous_replaced_job200->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_job200->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free job200,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if (_MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(job200);
+ }
+ MALI_ERROR(err);
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status)
+{
+ mali200_job *job200;
+ _mali_uk_pp_job_finished_s * job_out;
+ _mali_uk_pp_start_job_s * job_input;
+ mali_core_session *session;
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_PRINT(1, ("subsystem_mali200_return_job_to_user received a NULL ptr\n"));
+ return;
+ }
+
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if (NULL == job200->notification_obj)
+ {
+ MALI_DEBUG_PRINT(1, ("Found job200 with NULL notification object, abandoning userspace sending\n"));
+ return;
+ }
+
+ job_out = job200->notification_obj->result_buffer;
+ job_input= &(job200->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x OUTPUT to user. Runtime: %dms\n",
+ (u32)job200->user_input.user_job_ptr,
+ job->render_time_msecs)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_pp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+ job_out->irq_status = job200->irq_status;
+ job_out->perf_counter0 = job200->perf_counter0;
+ job_out->perf_counter1 = job200->perf_counter1;
+ job_out->render_time = job->render_time_msecs;
+
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = job200->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = job200->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = job200->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = job200->perf_counter_l2_val1;
+ job_out->perf_counter_l2_val0_raw = job200->perf_counter_l2_val0_raw;
+ job_out->perf_counter_l2_val1_raw = job200->perf_counter_l2_val1_raw;
+#endif
+
+ _mali_osk_notification_queue_send( session->notification_queue, job200->notification_obj);
+ job200->notification_obj = NULL;
+
+ _mali_osk_free(job200);
+}
+
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: mali200_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void mali200_reset_hard(struct mali_core_renderunit * core)
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_renderunit_reset_core_hard called for core %s\n", core->description));
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+}
+
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ mali200_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ mali200_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ mali200_reset_hard(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ }
+}
+
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power up core: %d - queue_only: %d\n", core_num, queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_mali200, core_num, queue_only ) );
+}
+
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power down core: %d - immediate_only: %d\n", core_num, immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_mali200, core_num, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+void mali200_subsystem_dump_state(void)
+{
+ mali_core_renderunit_dump_state(&subsystem_mali200);
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h
new file mode 100644
index 00000000000..08516c56a9f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_common.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+extern int mali_debug_level;
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c
new file mode 100644
index 00000000000..a40808bd2d8
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.c
@@ -0,0 +1,892 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_gp.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_rendercore.h"
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif /* USING_MALI_PMM */
+
+/* platform specific set up */
+#include "mali_platform.h"
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_core_id = -1;
+
+/** Pointer to table of resource definitions available to the Mali driver.
+ * _mali_osk_resources_init() sets up the pointer to this table.
+ */
+static _mali_osk_resource_t *arch_configuration = NULL;
+
+/** Number of resources initialized by _mali_osk_resources_init() */
+static u32 num_resources;
+
+static _mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources );
+
+static _mali_osk_errcode_t initialize_subsystems(void);
+static void terminate_subsystems(void);
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id);
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+static _mali_osk_errcode_t build_system_info(void);
+
+/**
+ * @brief handler for MEM_VALIDATION resources
+ *
+ * This resource handler is common to all memory systems. It provides a default
+ * means for validating requests to map in external memory via
+ * _mali_ukk_map_external_mem. In addition, if _mali_ukk_va_to_pa is
+ * implemented, then _mali_ukk_va_to_pa can make use of this MEM_VALIDATION
+ * resource.
+ *
+ * MEM_VALIDATION also provide a CPU physical to Mali physical address
+ * translation, for use by _mali_ukk_map_external_mem.
+ *
+ * @note MEM_VALIDATION resources are only to handle simple cases where a
+ * certain physical address range is allowed to be mapped in by any process,
+ * e.g. a framebuffer at a fixed location. If the implementor has more complex
+ * mapping requirements, then they must either:
+ * - implement their own memory validation function
+ * - or, integrate with UMP.
+ *
+ * @param resource The resource to handle (type MEM_VALIDATION)
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource);
+
+/* MEM_VALIDATION handler state */
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+ s32 cpu_usage_adjust; /**< Offset to add to Mali Physical address to obtain CPU physical address */
+} _mali_mem_validation_t;
+
+#define INVALID_MEM 0xffffffff
+
+static _mali_mem_validation_t mem_validator = { INVALID_MEM, INVALID_MEM, -1 };
+
+static struct mali_kernel_subsystem mali_subsystem_core =
+{
+ mali_kernel_subsystem_core_setup, /* startup */
+ mali_kernel_subsystem_core_cleanup, /* shutdown */
+ NULL, /* load_complete */
+ mali_kernel_subsystem_core_system_info_fill, /* system_info_fill */
+ mali_kernel_subsystem_core_session_begin, /* session_begin */
+ NULL, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static struct mali_kernel_subsystem * subsystems[] =
+{
+ /* always initialize the hw subsystems first */
+ /* always included */
+ &mali_subsystem_memory,
+
+#if USING_MALI_PMM
+ /* The PMM must be initialized before any cores - including L2 cache */
+ &mali_subsystem_pmm,
+#endif
+
+ /* The rendercore subsystem must be initialized before any subsystem based on the
+ * rendercores is started e.g. mali_subsystem_mali200 and mali_subsystem_gp2 */
+ &mali_subsystem_rendercore,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_mali200,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_gp2,
+
+#if defined USING_MALI400_L2_CACHE
+ &mali_subsystem_l2_cache,
+#endif
+
+ /* always included */
+ /* NOTE Keep the core entry at the tail of the list */
+ &mali_subsystem_core
+};
+
+#define SUBSYSTEMS_COUNT ( sizeof(subsystems) / sizeof(subsystems[0]) )
+
+/* Pointers to this type available as incomplete struct in mali_kernel_session_manager.h */
+struct mali_session_data
+{
+ void * subsystem_data[SUBSYSTEMS_COUNT];
+ _mali_osk_notification_queue_t * ioctl_queue;
+};
+
+static mali_kernel_resource_registrator resource_handler[RESOURCE_TYPE_COUNT] = { NULL, };
+
+/* system info variables */
+static _mali_osk_lock_t *system_info_lock = NULL;
+static _mali_system_info * system_info = NULL;
+static u32 system_info_size = 0;
+
+/* is called from OS specific driver entry point */
+_mali_osk_errcode_t mali_kernel_constructor( void )
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_platform_init(NULL);
+ if (_MALI_OSK_ERR_OK != err) goto error1;
+
+ err = _mali_osk_init();
+ if (_MALI_OSK_ERR_OK != err) goto error2;
+
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Svn revision: %s\n", SVN_REV_STRING));
+
+ err = initialize_subsystems();
+ if (_MALI_OSK_ERR_OK != err) goto error3;
+
+ MALI_PRINT(("Mali device driver %s loaded\n", SVN_REV_STRING));
+
+ MALI_SUCCESS;
+
+error3:
+ MALI_PRINT(("Mali subsystems failed\n"));
+ _mali_osk_term();
+error2:
+ MALI_PRINT(("Mali device driver init failed\n"));
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit(NULL))
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+error1:
+ MALI_PRINT(("Failed to init platform\n"));
+ MALI_ERROR(err);
+}
+
+/* is called from OS specific driver exit point */
+void mali_kernel_destructor( void )
+{
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+ terminate_subsystems(); /* subsystems are responsible for their registered resources */
+ _mali_osk_term();
+
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit(NULL))
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+ MALI_DEBUG_PRINT(2, ("Module unloaded.\n"));
+}
+
+_mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources )
+{
+ _mali_osk_resource_t *arch_resource = *arch_configuration;
+ u32 i;
+#if USING_MALI_PMM
+ u32 is_pmu_first_resource = 1;
+#endif /* USING_MALI_PMM */
+
+ /* loop over arch configuration */
+ for (i = 0; i < num_resources; ++i, arch_resource++)
+ {
+ if ( (arch_resource->type >= RESOURCE_TYPE_FIRST) &&
+ (arch_resource->type < RESOURCE_TYPE_COUNT) &&
+ (NULL != resource_handler[arch_resource->type])
+ )
+ {
+#if USING_MALI_PMM
+ if((arch_resource->type != PMU) && (is_pmu_first_resource == 1))
+ {
+ _mali_osk_resource_t mali_pmu_virtual_resource;
+ mali_pmu_virtual_resource.type = PMU;
+ mali_pmu_virtual_resource.description = "Virtual PMU";
+ mali_pmu_virtual_resource.base = 0x00000000;
+ mali_pmu_virtual_resource.cpu_usage_adjust = 0;
+ mali_pmu_virtual_resource.size = 0;
+ mali_pmu_virtual_resource.irq = 0;
+ mali_pmu_virtual_resource.flags = 0;
+ mali_pmu_virtual_resource.mmu_id = 0;
+ mali_pmu_virtual_resource.alloc_order = 0;
+ MALI_CHECK_NO_ERROR(resource_handler[mali_pmu_virtual_resource.type](&mali_pmu_virtual_resource));
+ }
+ is_pmu_first_resource = 0;
+#endif /* USING_MALI_PMM */
+
+ MALI_CHECK_NO_ERROR(resource_handler[arch_resource->type](arch_resource));
+ /* the subsystem shutdown process will release all the resources already registered */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("No handler installed for resource %s, type %d\n", arch_resource->description, arch_resource->type));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t initialize_subsystems(void)
+{
+ int i, j;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; /* default error code */
+
+ MALI_CHECK_NON_NULL(system_info_lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0 ), _MALI_OSK_ERR_FAULT);
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->startup)
+ {
+ /* the subsystem has a startup function defined */
+ err = subsystems[i]->startup(i); /* the subsystem identifier is the offset in our subsystems array */
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ for (j = 0; j < (int)SUBSYSTEMS_COUNT; ++j)
+ {
+ if (NULL != subsystems[j]->load_complete)
+ {
+ /* the subsystem has a load_complete function defined */
+ err = subsystems[j]->load_complete(j);
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ /* All systems loaded and resources registered */
+ /* Build system info */
+ if (_MALI_OSK_ERR_OK != build_system_info()) goto cleanup;
+
+ MALI_SUCCESS; /* all ok */
+
+cleanup:
+ /* i is index of subsystem which failed to start, all indices before that has to be shut down */
+ for (i = i - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ /* Call possible shutdown notficiation functions */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+
+ _mali_osk_lock_term( system_info_lock );
+ MALI_ERROR(err); /* err is what the module which failed its startup returned, or the default */
+}
+
+static void terminate_subsystems(void)
+{
+ int i;
+ /* shut down subsystems in reverse order from startup */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+ if (system_info_lock) _mali_osk_lock_term( system_info_lock );
+}
+
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data)
+{
+ int i;
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->broadcast_notification)
+ {
+ subsystems[i]->broadcast_notification(message, data);
+ }
+ }
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id)
+{
+ mali_subsystem_core_id = id;
+
+ /* Register our own resources */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEM_VALIDATION, mali_kernel_core_resource_mem_validation));
+
+ /* parse the arch resource definition and tell all the subsystems */
+ /* this is why the core subsystem has to be specified last in the subsystem array */
+ MALI_CHECK_NO_ERROR(_mali_osk_resources_init(&arch_configuration, &num_resources));
+
+ MALI_CHECK_NO_ERROR(register_resources(&arch_configuration, num_resources));
+
+ /* resource parsing succeeded and the subsystem have corretly accepted their resources */
+ MALI_SUCCESS;
+}
+
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+}
+
+
+static _mali_osk_errcode_t build_system_info(void)
+{
+ unsigned int i;
+ int err = _MALI_OSK_ERR_FAULT;
+ _mali_system_info * new_info, * cleanup;
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ u32 new_size = 0;
+
+ /* create a new system info struct */
+ MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(new_info, 0, sizeof(_mali_system_info));
+
+ /* if an error happens during any of the system_info_fill calls cleanup the new info structs */
+ cleanup = new_info;
+
+ /* ask each subsystems to fill in their info */
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->system_info_fill)
+ {
+ err = subsystems[i]->system_info_fill(new_info);
+ if (_MALI_OSK_ERR_OK != err) goto error_exit;
+ }
+ }
+
+ /* building succeeded, calculate the size */
+
+ /* size needed of the system info struct itself */
+ new_size = sizeof(_mali_system_info);
+
+ /* size needed for the cores */
+ for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+ new_size += sizeof(_mali_core_info);
+ }
+
+ /* size needed for the memory banks */
+ for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ new_size += sizeof(_mali_mem_info);
+ }
+
+ /* lock system info access so a user wont't get a corrupted version */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* cleanup the old one */
+ cleanup = system_info;
+ /* set new info */
+ system_info = new_info;
+ system_info_size = new_size;
+
+ /* we're safe */
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* ok result */
+ err = _MALI_OSK_ERR_OK;
+
+ /* we share the cleanup routine with the error case */
+error_exit:
+ if (NULL == cleanup) MALI_ERROR((_mali_osk_errcode_t)err); /* no cleanup needed, return what err contains */
+
+ /* cleanup */
+
+ /* delete all the core info structs */
+ while (NULL != cleanup->core_info)
+ {
+ current_core = cleanup->core_info;
+ cleanup->core_info = cleanup->core_info->next;
+ _mali_osk_free(current_core);
+ }
+
+ /* delete all the mem info struct */
+ while (NULL != cleanup->mem_info)
+ {
+ current_mem = cleanup->mem_info;
+ cleanup->mem_info = cleanup->mem_info->next;
+ _mali_osk_free(current_mem);
+ }
+
+ /* delete the system info struct itself */
+ _mali_osk_free(cleanup);
+
+ /* return whatever err is, we could end up here in both the error and success cases */
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info_size(_mali_uk_get_system_info_size_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ args->size = system_info_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ void * current_write_pos, ** current_patch_pos;
+ u32 adjust_ptr_base;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* lock the system info */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* first check size */
+ if (args->size < system_info_size) goto exit_when_locked;
+
+ /* we build a copy of system_info in the user space buffer specified by the user and
+ * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
+ * indicate a different base address for patching the pointers (normally the
+ * address of the provided system_info buffer would be used). This is helpful when
+ * the system_info buffer needs to get copied to user space and the pointers need
+ * to be in user space.
+ */
+ if (0 == args->ukk_private)
+ {
+ adjust_ptr_base = (u32)args->system_info;
+ }
+ else
+ {
+ adjust_ptr_base = args->ukk_private;
+ }
+
+ /* copy each struct into the buffer, and update its pointers */
+ current_write_pos = (void *)args->system_info;
+
+ /* first, the master struct */
+ _mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));
+
+ /* advance write pointer */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));
+
+ /* first we write the core info structs, patch starts at master's core_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));
+
+ for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ /* then we write the mem info structs, patch starts at master's mem_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));
+
+ for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ err = _MALI_OSK_ERR_OK;
+exit_when_locked:
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if ( NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n")) ;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ info->drivermode = _MALI_DRIVER_MODE_NORMAL;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ MALI_CHECK_NON_NULL(slot, _MALI_OSK_ERR_INVALID_ARGS);
+ *slot = queue;
+ MALI_SUCCESS;
+}
+
+/* MEM_VALIDATION resource handler */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ MALI_CHECK( ((u32)-1) == mem_validator.phys_base, _MALI_OSK_ERR_FAULT );
+
+ /* Check restrictions on page alignment */
+ MALI_CHECK( 0 == (resource->base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->cpu_usage_adjust & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ mem_validator.phys_base = resource->base;
+ mem_validator.size = resource->size;
+ mem_validator.cpu_usage_adjust = resource->cpu_usage_adjust;
+ MALI_DEBUG_PRINT( 2, ("Memory Validator '%s' installed for Mali physical address base==0x%08X, size==0x%08X, cpu_adjust==0x%08X\n",
+ resource->description, mem_validator.phys_base, mem_validator.size, mem_validator.cpu_usage_adjust ));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size )
+{
+ u32 mali_phys_base;
+
+ mali_phys_base = *phys_base - mem_validator.cpu_usage_adjust;
+
+ MALI_CHECK( 0 == ( mali_phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( mali_phys_base, size ) );
+
+ *phys_base = mali_phys_base;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size )
+{
+ MALI_CHECK_GOTO( 0 == ( phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+ MALI_CHECK_GOTO( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+
+ if ( phys_base >= mem_validator.phys_base
+ && (phys_base + size) >= mem_validator.phys_base
+ && phys_base <= (mem_validator.phys_base + mem_validator.size)
+ && (phys_base + size) <= (mem_validator.phys_base + mem_validator.size) )
+ {
+ MALI_SUCCESS;
+ }
+
+ failure:
+ MALI_PRINTF( ("*******************************************************************************\n") );
+ MALI_PRINTF( ("MALI PHYSICAL RANGE VALIDATION ERROR!\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("We failed to validate a Mali-Physical range that the user-side wished to map in\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("It is likely that the user-side wished to do Direct Rendering, but a suitable\n") );
+ MALI_PRINTF( ("address range validation mechanism has not been correctly setup\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_base, size) );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("Please refer to the ARM Mali Software Integration Guide for more information.\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("*******************************************************************************\n") );
+
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler)
+{
+ MALI_CHECK(type < RESOURCE_TYPE_COUNT, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL == resource_handler[type]); /* A handler for resource already exists */
+ resource_handler[type] = handler;
+ MALI_SUCCESS;
+}
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session_data, int id)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ if(id >= SUBSYSTEMS_COUNT) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: id %d out of range\n", id)); return NULL; }
+
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: got NULL session data\n")); return NULL; }
+ return session_data->subsystem_data[id];
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ int i;
+ _mali_osk_errcode_t err;
+ struct mali_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data));
+
+ /* create a response queue for this session */
+ session_data->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session_data->ioctl_queue)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* call session_begin on all subsystems */
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->session_begin)
+ {
+ /* subsystem has a session_begin */
+ err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue);
+ MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup);
+ }
+ }
+
+ *context = (void*)session_data;
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ MALI_SUCCESS;
+
+cleanup:
+ MALI_DEBUG_PRINT(2, ("Session startup failed\n"));
+ /* i is index of subsystem which failed session begin, all indices before that has to be ended */
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = i - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ /* return what the subsystem which failed session start returned */
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ int i;
+ struct mali_session_data * session_data;
+
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(2, ("Session ending\n"));
+
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_up(queue_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ if( !queue_only )
+ {
+ /* Enable L2 cache due to power up */
+ mali_kernel_l2_cache_do_enable();
+
+ /* Invalidate the cache on power up */
+ MALI_DEBUG_PRINT(5, ("L2 Cache: Invalidate all\n"));
+ MALI_CHECK_NO_ERROR(mali_kernel_l2_cache_invalidate_all());
+ }
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(0, queue_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(1, queue_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(2, queue_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(3, queue_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power up: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_down(immediate_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ /* Nothing to do */
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(0, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(1, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(2, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(3, immediate_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power down: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
+
+
+
+#if MALI_STATE_TRACKING
+void _mali_kernel_core_dump_state(void)
+{
+ int i;
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->dump_state)
+ {
+ subsystems[i]->dump_state();
+ }
+ }
+#if USING_MALI_PMM
+ mali_pmm_dump_os_thread_state();
+#endif
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h
new file mode 100644
index 00000000000..3819ecc5e1c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_core.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_ukk.h"
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#endif
+
+_mali_osk_errcode_t mali_kernel_constructor( void );
+void mali_kernel_destructor( void );
+
+/**
+ * @brief Tranlate CPU physical to Mali physical addresses.
+ *
+ * This function is used to convert CPU physical addresses to Mali Physical
+ * addresses, such that _mali_ukk_map_external_mem may be used to map them
+ * into Mali. This will be used by _mali_ukk_va_to_mali_pa.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully translated.
+ *
+ * If a more complex, or non-static translation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a translation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param[in,out] phys_base pointer to the page-aligned base address of the
+ * physical range to be translated
+ *
+ * @param[in] size size of the address range to be translated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return on success, _MALI_OSK_ERR_OK and *phys_base is translated. If the
+ * cpu physical address range is not in the valid range, then a suitable
+ * _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size );
+
+
+/**
+ * @brief Validate a Mali physical address range.
+ *
+ * This function is used to ensure that an address range passed to
+ * _mali_ukk_map_external_mem is allowed to be mapped into Mali.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully validated.
+ *
+ * If a more complex, or non-static validation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a validation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param phys_base page-aligned base address of the Mali physical range to be
+ * validated.
+ *
+ * @param size size of the address range to be validated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return _MALI_OSK_ERR_OK if the Mali physical range is valid. Otherwise, a
+ * suitable _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size );
+
+#if USING_MALI_PMM
+/**
+ * @brief Signal a power up on a Mali core.
+ *
+ * This function flags a core as powered up.
+ * For PP and GP cores it calls functions that move the core from a power off
+ * queue into the idle queue ready to run jobs. It also tries to schedule any
+ * pending jobs to run on it.
+ *
+ * This function will fail if the core is not powered off - either running or
+ * already idle.
+ *
+ * @param core The PMM core id to power up.
+ * @param queue_only When MALI_TRUE only re-queue the core - do not reset.
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only );
+
+/**
+ * @brief Signal a power down on a Mali core.
+ *
+ * This function flags a core as powered down.
+ * For PP and GP cores it calls functions that move the core from an idle
+ * queue into the power off queue.
+ *
+ * This function will fail if the core is not idle - either running or
+ * already powered down.
+ *
+ * @param core The PMM core id to power up.
+ * @param immediate_only Do not set the core to pending power down if it can't
+ * power down immediately
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only );
+
+#endif
+
+/**
+ * Flag to indicate whether or not mali_benchmark is turned on.
+ */
+extern int mali_benchmark;
+
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000..45c280edd6a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+#if !USING_MMU
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 20);
+#else
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 116);
+#endif
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000..f626aa59455
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h
new file mode 100644
index 00000000000..c2467edc677
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_gp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_GP2_H__
+#define __MALI_KERNEL_GP2_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_gp2;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only );
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_GP2_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c
new file mode 100644
index 00000000000..fded5c93ab9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_kernel_l2_cache.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0002,
+ /*unused = 0x0003 */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0004, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0005,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0006, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x0007, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0008,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0009,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x000A,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x000B,
+} mali_l2_cache_register;
+
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_l2_cache_core
+{
+ unsigned long base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple cache cores into a list */
+ _mali_osk_lock_t *lock; /**< Serialize all L2 cache commands */
+} mali_kernel_l2_cache_core;
+
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/**
+ * Mali L2 cache subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ *
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * L2 cache subsystem complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem's notification handler for a Mali L2 cache resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each Mali L2 cache described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MALI400L2)
+ * @return 0 if the Mali L2 cache was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource);
+
+/**
+ * Write to a L2 cache register
+ * Writes the given value to the specified register
+ * @param unit The L2 cache to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val);
+
+
+
+/**
+ * Invalidate specified L2 cache
+ * @param cache The L2 cache to invalidate
+ * @return 0 if Mali L2 cache was successfully invalidated, otherwise error
+ */
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache);
+
+
+/*
+ The fixed Mali L2 cache system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_l2_cache =
+{
+ mali_l2_cache_initialize, /**< startup */
+ mali_l2_cache_terminate, /**< shutdown */
+ mali_l2_cache_load_complete, /**< load_complete */
+ NULL, /**< system_info_fill */
+ NULL, /**< session_begin */
+ NULL, /**< session_end */
+ NULL, /**< broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /**< dump_state */
+#endif
+};
+
+
+
+static _MALI_OSK_LIST_HEAD(caches_head);
+
+
+
+
+/* called during module init */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_IGNORE( id );
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system initializing\n"));
+
+ _MALI_OSK_INIT_LIST_HEAD(&caches_head);
+
+ /* This will register the function for adding Mali L2 cache cores to the subsystem */
+ err = _mali_kernel_core_register_resource_handler(MALI400L2, mali_l2_cache_core_create);
+
+ MALI_ERROR(err);
+}
+
+
+
+/* called if/when our module is unloaded */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system terminating\n"));
+
+ /* loop over all L2 cache units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list )
+ {
+ /* reset to defaults */
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ /* remove from the list of cacges on the system */
+ _mali_osk_list_del( &cache->list );
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers );
+ _mali_osk_mem_unreqregion( cache->base, cache->mapping_size );
+ _mali_osk_lock_term( cache->lock );
+ _mali_osk_free( cache );
+
+ #if USING_MALI_PMM
+ /* Unregister the L2 cache with the PMM */
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+ #endif
+ }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT ;
+ mali_kernel_l2_cache_core * cache = NULL;
+
+ MALI_DEBUG_PRINT(2, ( "Creating Mali L2 cache: %s\n", resource->description));
+
+#if USING_MALI_PMM
+ /* Register the L2 cache with the PMM */
+ err = malipmm_core_register( MALI_PMM_CORE_L2 );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to register L2 cache unit with PMM"));
+ return err;
+ }
+#endif
+
+ err = _mali_osk_mem_reqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE, resource->description);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup_requestmem_failed);
+
+ /* Reset error that might be passed out */
+ err = _MALI_OSK_ERR_FAULT;
+
+ cache = _mali_osk_malloc(sizeof(mali_kernel_l2_cache_core));
+
+ MALI_CHECK_GOTO( NULL != cache, err_cleanup);
+
+ cache->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 104 );
+
+ MALI_CHECK_GOTO( NULL != cache->lock, err_cleanup);
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&cache->list);
+
+ cache->base = resource->base;
+ cache->mapping_size = MALI400_L2_CACHE_REGISTERS_SIZE;
+
+ /* map the registers */
+ cache->mapped_registers = _mali_osk_mem_mapioregion( cache->base, cache->mapping_size, resource->description );
+
+ MALI_CHECK_GOTO( NULL != cache->mapped_registers, err_cleanup);
+
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ err = mali_kernel_l2_cache_invalidate_all_cache(cache);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup);
+
+ /* add to our list of L2 caches */
+ _mali_osk_list_add( &cache->list, &caches_head );
+
+ MALI_SUCCESS;
+
+err_cleanup:
+ /* This cleanup used when resources have been requested successfully */
+
+ if ( NULL != cache )
+ {
+ if (NULL != cache->mapped_registers)
+ {
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to map Mali L2 cache registers at 0x%08lX\n", cache->base));
+ }
+
+ if( NULL != cache->lock )
+ {
+ _mali_osk_lock_term( cache->lock );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate a lock for handling a L2 cache unit"));
+ }
+
+ _mali_osk_free( cache );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate memory for handling a L2 cache unit"));
+ }
+
+ /* A call is to request region, so this must always be reversed */
+ _mali_osk_mem_unreqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE);
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+err_cleanup_requestmem_failed:
+ MALI_DEBUG_PRINT(1, ("Failed to request Mali L2 cache '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI400_L2_CACHE_REGISTERS_SIZE - 1) );
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+}
+
+
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val)
+{
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+static u32 mali_l2_cache_register_read(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg)
+{
+ return _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+}
+
+void mali_kernel_l2_cache_do_enable(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and enable them*/
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+ }
+}
+
+
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_do_enable();
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system load complete\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_send_command(mali_kernel_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(_mali_osk_mem_ioread32(cache->mapped_registers , (u32)MALI400_L2_CACHE_REGISTER_STATUS * sizeof(u32)) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_l2_cache_register_write(cache, reg, val);
+
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_all_cache(cache) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page_cache(mali_kernel_l2_cache_core *cache, u32 page)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, page);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_page_cache(cache, page) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int reset0 = force_reset;
+ int reset1 = force_reset;
+ MALI_DEBUG_CODE(
+ int changed0 = 0;
+ int changed1 = 0;
+ )
+
+ /* loop over all L2 cache units and activate the counters on them */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+
+ if (src0 != cur_src0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, src0);
+ MALI_DEBUG_CODE(changed0 = 1;)
+ reset0 = 1;
+ }
+
+ if (src1 != cur_src1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, src1);
+ MALI_DEBUG_CODE(changed1 = 1;)
+ reset1 = 1;
+ }
+
+ if (reset0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+ }
+
+ if (reset1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters set: SRC0=%u, CHANGED0=%d, RESET0=%d, SRC1=%u, CHANGED1=%d, RESET1=%d\n",
+ src0, changed0, reset0,
+ src1, changed1, reset1));
+ }
+}
+
+
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int first_time = 1;
+ *src0 = 0;
+ *src1 = 0;
+ *val0 = 0;
+ *val1 = 0;
+
+ /* loop over all L2 cache units and read the counters */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+ u32 cur_val0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ u32 cur_val1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters get: SRC0=%u, VAL0=%u, SRC1=%u, VAL1=%u\n", cur_src0, cur_val0, cur_src1, cur_val1));
+
+ if (first_time)
+ {
+ *src0 = cur_src0;
+ *src1 = cur_src1;
+ first_time = 0;
+ }
+
+ if (*src0 == cur_src0 && *src1 == cur_src1)
+ {
+ *val0 += cur_val0;
+ *val1 += cur_val1;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Warning: Mali L2 caches has different performance counters set, not retrieving data\n"));
+ }
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h
new file mode 100644
index 00000000000..de3229a0688
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_l2_cache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_l2_cache;
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void);
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page);
+
+void mali_kernel_l2_cache_do_enable(void);
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset);
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h
new file mode 100644
index 00000000000..681b6dd47c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_H__
+#define __MALI_KERNEL_MEM_H__
+
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_memory;
+
+#endif /* __MALI_KERNEL_MEM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c
new file mode 100644
index 00000000000..8277fd1e1c6
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_buddy.c
@@ -0,0 +1,1425 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_session_manager.h"
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#ifdef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#include "mali_osk_indir_mmap.h"
+#endif
+
+/**
+ * Minimum memory allocation size
+ */
+#define MIN_BLOCK_SIZE (1024*1024UL)
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 4096
+
+/**
+ * Enum uses to store multiple fields in one u32 to keep the memory block struct small
+ */
+enum MISC_SHIFT { MISC_SHIFT_FREE = 0, MISC_SHIFT_ORDER = 1, MISC_SHIFT_TOPLEVEL = 6 };
+enum MISC_MASK { MISC_MASK_FREE = 0x01, MISC_MASK_ORDER = 0x1F, MISC_MASK_TOPLEVEL = 0x1F };
+
+/* forward declaration of the block struct */
+struct mali_memory_block;
+
+/**
+ * Definition of memory bank type.
+ * Represents a memory bank (separate address space)
+ * Each bank keeps track of its block usage.
+ * A buddy system used to track the usage
+*/
+typedef struct mali_memory_bank
+{
+ _mali_osk_list_t list; /* links multiple banks together */
+ _mali_osk_lock_t *lock;
+ u32 base_addr; /* Mali seen address of bank */
+ u32 cpu_usage_adjust; /* Adjustmen factor for what the CPU sees */
+ u32 size; /* the effective size */
+ u32 real_size; /* the real size of the bank, as given by to the subsystem */
+ int min_order;
+ int max_order;
+ struct mali_memory_block * blocklist;
+ _mali_osk_list_t *freelist;
+ _mali_osk_atomic_t num_active_allocations;
+ u32 used_for_flags;
+ u32 alloc_order; /**< Order in which the bank will be used for allocations */
+ const char *name; /**< Descriptive name of the bank */
+} mali_memory_bank;
+
+/**
+ * Definition of the memory block type
+ * Represents a memory block, which is the smallest memory unit operated on.
+ * A block keeps info about its mapping, if in use by a user process
+ */
+typedef struct mali_memory_block
+{
+ _mali_osk_list_t link; /* used for freelist and process usage list*/
+ mali_memory_bank * bank; /* the bank it belongs to */
+ void __user * mapping; /* possible user space mapping of this block */
+ u32 misc; /* used while a block is free to track the number blocks it represents */
+ int descriptor;
+ u32 mmap_cookie; /**< necessary for interaction with _mali_ukk_mem_mmap/munmap */
+} mali_memory_block;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the head of the list of memory currently in use by a session.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t memory_head; /* List of the memory blocks used by this session. */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+} memory_session;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Buddy block memory subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Reports on the memory resources registered
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+
+/**
+ * Buddy block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @param queue The user space event sink
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Buddy block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Buddy block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+/**
+ * Buddy block memory subsystem's notification handler for MEMORY resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each memory bank described in the active architecture's config.h file.
+ * Requests memory region ownership and calls backend.
+ * @param resource The resource to handle (type MEMORY)
+ * @return 0 if the memory was claimed and accepted, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+/* ioctl command implementations */
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_GET_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_GET_BIG_BLOCK command is received.
+ * Finds an available memory block and maps into the current process' address space.
+ * @param ukk_private private word for use by the User/Kernel interface
+ * @param session_data Pointer to the per-session object which will track the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_FREE_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_FREE_BIG_BLOCK command is received.
+ * Unmaps the memory from the process' address space and marks the block as free.
+ * @param session_data Pointer to the per-session object which tracks the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+
+/* this static version allows us to make use of it while holding the memory_session lock.
+ * This is required for the session_end code */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args);
+
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's memory bank registration routine
+ * Called when a MEMORY resource has been found.
+ * The memory region has already been reserved for use by this driver.
+ * Create a bank object to represent this region and initialize its slots.
+ * @note Can only be called in an module atomic scope, i.e. during module init since no locking is performed
+ * @param phys_base Physical base address of this bank
+ * @param cpu_usage_adjust Adjustment factor for CPU seen address
+ * @param size Size of the bank in bytes
+ * @param flags Memory type bits
+ * @param alloc_order Order in which the bank will be used for allocations
+ * @param name descriptive name of the bank
+ * @return Zero on success, negative on error
+ */
+static int mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name);
+
+/**
+ * Get a block of mali memory of at least the given size and of the given type
+ * This is the backend for get_big_block.
+ * @param type_id The type id of memory requested.
+ * @param minimum_size The size requested
+ * @return Pointer to a block on success, NULL on failure
+ */
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size);
+
+/**
+ * Get the mali seen address of the memory described by the block
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block);
+
+/**
+ * Get the cpu seen address of the memory described by the block
+ * The cpu_usage_adjust will be used to change the mali seen phys address
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block);
+
+/**
+ * Get the size of the memory described by the given block
+ * @param block The memory block to return the size of
+ * @return The size of the memory block described by the object
+ */
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block);
+
+/**
+ * Get the user space accessible mapping the memory described by the given memory block
+ * Returns a pointer in user space to the memory, if one has been created.
+ * @param block The memory block to return the mapping of
+ * @return User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block);
+
+/**
+ * Set the user space accessible mapping the memory described by the given memory block.
+ * Sets the stored pointer to user space for the memory described by this block.
+ * @param block The memory block to set mapping info for
+ * @param ptr User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr);
+
+/**
+ * Get the cookie for use with _mali_ukk_mem_munmap().
+ * @param block The memory block to get the cookie from
+ * @return the cookie. A return of 0 is still a valid cookie.
+ */
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block);
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie);
+
+
+/**
+ * Get a memory block's free status
+ * @param block The block to get the state of
+ */
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block);
+
+/**
+ * Set a memory block's free status
+ * @param block The block to set the state for
+ * @param state The state to set
+ */
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state);
+
+/**
+ * Set a memory block's order
+ * @param block The block to set the order for
+ * @param order The order to set
+ */
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order);
+
+/**
+ * Get a memory block's order
+ * @param block The block to get the order for
+ * @return The order this block exists on
+ */
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block);
+
+/**
+ * Tag a block as being a toplevel block.
+ * A toplevel block has no buddy and no parent
+ * @param block The block to tag as being toplevel
+ */
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level);
+
+/**
+ * Check if a block is a toplevel block
+ * @param block The block to check
+ * @return 1 if toplevel, 0 else
+ */
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block);
+
+/**
+ * Checks if the given block is a buddy at the given order and that it's free
+ * @param block The block to check
+ * @param order The order to check against
+ * @return 0 if not valid, else 1
+ */
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order);
+
+/*
+ The buddy system uses the following rules to quickly find a blocks buddy
+ and parent (block representing this block at a higher order level):
+ - Given a block with index i the blocks buddy is at index i ^ ( 1 << order)
+ - Given a block with index i the blocks parent is at i & ~(1 << order)
+*/
+
+/**
+ * Get a blocks buddy
+ * @param block The block to find the buddy for
+ * @param order The order to operate on
+ * @return Pointer to the buddy block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order);
+
+/**
+ * Get a blocks parent
+ * @param block The block to find the parent for
+ * @param order The order to operate on
+ * @return Pointer to the parent block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order);
+
+/**
+ * Release mali memory
+ * Backend for free_big_block.
+ * Will release the mali memory described by the given block struct.
+ * @param block Memory block to free
+ */
+static void block_release(mali_memory_block * block);
+
+/* end interface implementation */
+
+/**
+ * List of all the memory banks registerd with the subsystem.
+ * Access to this list is NOT synchronized since it's only
+ * written to during module init and termination.
+ */
+static _MALI_OSK_LIST_HEAD(memory_banks_list);
+
+/*
+ The buddy memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ mali_memory_core_terminate, /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = -1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&memory_banks_list);
+
+ mali_subsystem_memory_id = id;
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_memory));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga));
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ /* loop over all memory banks to free them */
+ /* we use the safe version since we delete the current bank in the body */
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ MALI_DEBUG_CODE(int usage_count = _mali_osk_atomic_read(&bank->num_active_allocations));
+ /*
+ Report leaked memory
+ If this happens we have a bug in our session cleanup code.
+ */
+ MALI_DEBUG_PRINT_IF(1, 0 != usage_count, ("%d allocation(s) from memory bank at 0x%X still in use\n", usage_count, bank->base_addr));
+
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+
+ _mali_osk_lock_term(bank->lock);
+
+ /* unlink from bank list */
+ _mali_osk_list_del(&bank->list);
+
+ /* release kernel resources used by the bank */
+ _mali_osk_mem_unreqregion(bank->base_addr, bank->real_size);
+
+ /* remove all resources used to represent this bank*/
+ _mali_osk_free(bank->freelist);
+ _mali_osk_free(bank->blocklist);
+
+ /* destroy the bank object itself */
+ _mali_osk_free(bank);
+ }
+
+ /* No need to de-initialize mali_subsystem_memory_id - it could only be
+ * re-initialized to the same value */
+}
+
+/* load_complete handler */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest number first) :\n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ if ( NULL != bank->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", bank->alloc_order, bank->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", bank->alloc_order ) );
+ }
+ }
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE u32 order_needed_for_size(u32 size, struct mali_memory_bank * bank)
+{
+ u32 order = 0;
+
+ if (0 < size)
+ {
+ for ( order = sizeof(u32)*8 - 1; ((1UL<<order) & size) == 0; --order)
+ /* nothing */;
+
+ /* check if size is pow2, if not we need increment order by one */
+ if (0 != (size & ((1UL<<order)-1))) ++order;
+ }
+
+ if ((NULL != bank) && (order < bank->min_order)) order = bank->min_order;
+ /* Not capped to max order, that doesn't make sense */
+
+ return order;
+}
+
+MALI_STATIC_INLINE u32 maximum_order_which_fits(u32 size)
+{
+ u32 order = 0;
+ u32 powsize = 1;
+ while (powsize < size)
+ {
+ powsize <<= 1;
+ if (powsize > size) break;
+ order++;
+ }
+
+ return order;
+}
+
+/* called for new MEMORY resources */
+static _mali_osk_errcode_t mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name)
+{
+ /* no locking performed due to function contract */
+ int i;
+ u32 left, offset;
+ mali_memory_bank * bank;
+ mali_memory_bank * bank_enum, *temp;
+
+ _mali_osk_errcode_t err;
+
+ /* Only a multiple of MIN_BLOCK_SIZE is usable */
+ u32 usable_size = size & ~(MIN_BLOCK_SIZE - 1);
+
+ /* handle zero sized banks and bank smaller than the fixed block size */
+ if (0 == usable_size)
+ {
+ MALI_PRINT(("Usable size == 0\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* warn for banks not a muliple of the block size */
+ MALI_DEBUG_PRINT_IF(1, usable_size != size, ("Memory bank @ 0x%X not a multiple of minimum block size. %d bytes wasted\n", phys_base, size - usable_size));
+
+ /* check against previous registrations */
+ MALI_DEBUG_CODE(
+ {
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ /* duplicate ? */
+ if (bank->base_addr == phys_base)
+ {
+ MALI_PRINT(("Duplicate registration of a memory bank at 0x%X detected\n", phys_base));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ /* overlapping ? */
+ else if (
+ ( (phys_base > bank->base_addr) && (phys_base < (bank->base_addr + bank->real_size)) ) ||
+ ( (phys_base + size) > bank->base_addr && ((phys_base + size) < (bank->base_addr + bank->real_size)) )
+ )
+ {
+ MALI_PRINT(("Overlapping memory blocks found. Memory at 0x%X overlaps with memory at 0x%X size 0x%X\n", bank->base_addr, phys_base, size));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ }
+ );
+
+ /* create an object to represent this memory bank */
+ MALI_CHECK_NON_NULL(bank = (mali_memory_bank*)_mali_osk_malloc(sizeof(mali_memory_bank)), _MALI_OSK_ERR_NOMEM);
+
+ /* init the fields */
+ _MALI_OSK_INIT_LIST_HEAD(&bank->list);
+ bank->base_addr = phys_base;
+ bank->cpu_usage_adjust = cpu_usage_adjust;
+ bank->size = usable_size;
+ bank->real_size = size;
+ bank->alloc_order = alloc_order;
+ bank->name = name;
+
+ err = _mali_osk_atomic_init(&bank->num_active_allocations, 0);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_free(bank);
+ MALI_ERROR(err);
+ }
+
+ bank->used_for_flags = flags;
+ bank->min_order = order_needed_for_size(MIN_BLOCK_SIZE, NULL);
+ bank->max_order = maximum_order_which_fits(usable_size);
+ bank->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == bank->lock)
+ {
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ bank->blocklist = _mali_osk_calloc(1, sizeof(struct mali_memory_block) * (usable_size / MIN_BLOCK_SIZE));
+ if (NULL == bank->blocklist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (usable_size / MIN_BLOCK_SIZE); i++)
+ {
+ bank->blocklist[i].bank = bank;
+ }
+
+ bank->freelist = _mali_osk_calloc(1, sizeof(_mali_osk_list_t) * (bank->max_order - bank->min_order + 1));
+ if (NULL == bank->freelist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_free(bank->blocklist);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (bank->max_order - bank->min_order + 1); i++) _MALI_OSK_INIT_LIST_HEAD(&bank->freelist[i]);
+
+ /* init slot info */
+ for (offset = 0, left = usable_size; offset < (usable_size / MIN_BLOCK_SIZE); /* updated inside the body */)
+ {
+ u32 block_order;
+ mali_memory_block * block;
+
+ /* the maximum order which fits in the remaining area */
+ block_order = maximum_order_which_fits(left);
+
+ /* find the block pointer */
+ block = &bank->blocklist[offset];
+
+ /* tag the block as being toplevel */
+ set_block_toplevel(block, block_order);
+
+ /* tag it as being free */
+ set_block_free(block, 1);
+
+ /* set the order */
+ set_block_order(block, block_order);
+
+ _mali_osk_list_addtail(&block->link, bank->freelist + (block_order - bank->min_order));
+
+ left -= (1 << block_order);
+ offset += ((1 << block_order) / MIN_BLOCK_SIZE);
+ }
+
+ /* add bank to list of banks on the system */
+ _MALI_OSK_LIST_FOREACHENTRY( bank_enum, temp, &memory_banks_list, mali_memory_bank, list )
+ {
+ if ( bank_enum->alloc_order >= alloc_order )
+ {
+ /* Found insertion point - our item must go before this one */
+ break;
+ }
+ }
+ _mali_osk_list_addtail(&bank->list, &bank_enum->list);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_mmu_register(u32 type, u32 phys_base)
+{
+ /* not supported */
+ return _MALI_OSK_ERR_INVALID_FUNC;
+}
+
+void mali_memory_mmu_unregister(u32 phys_base)
+{
+ /* not supported */
+ return;
+}
+
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size)
+{
+ mali_memory_bank * bank;
+ mali_memory_block * block = NULL;
+ u32 requested_order, current_order;
+
+ /* input validation */
+ if (0 == minimum_size)
+ {
+ /* bad size */
+ MALI_DEBUG_PRINT(2, ("Zero size block requested by mali_memory_block_get\n"));
+ return NULL;
+ }
+
+ bank = (mali_memory_bank*)type_id;
+
+ requested_order = order_needed_for_size(minimum_size, bank);
+
+ MALI_DEBUG_PRINT(4, ("For size %d we need order %d (%d)\n", minimum_size, requested_order, 1 << requested_order));
+
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ MALI_DEBUG_PRINT(7, ("Bank 0x%x locked\n", bank));
+
+ for (current_order = requested_order; current_order <= bank->max_order; ++current_order)
+ {
+ _mali_osk_list_t * list = bank->freelist + (current_order - bank->min_order);
+ MALI_DEBUG_PRINT(7, ("Checking freelist 0x%x for order %d\n", list, current_order));
+ if (0 != _mali_osk_list_empty(list)) continue; /* empty list */
+
+ MALI_DEBUG_PRINT(7, ("Found an entry on the freelist for order %d\n", current_order));
+
+
+ block = _MALI_OSK_LIST_ENTRY(list->next, mali_memory_block, link);
+ _mali_osk_list_delinit(&block->link);
+
+ while (current_order > requested_order)
+ {
+ mali_memory_block * buddy_block;
+ MALI_DEBUG_PRINT(7, ("Splitting block 0x%x\n", block));
+ current_order--;
+ list--;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ set_block_order(buddy_block, current_order);
+ set_block_free(buddy_block, 1);
+ _mali_osk_list_add(&buddy_block->link, list);
+ }
+
+ set_block_order(block, current_order);
+ set_block_free(block, 0);
+
+ /* update usage count */
+ _mali_osk_atomic_inc(&bank->num_active_allocations);
+
+ break;
+ }
+
+ /* ! critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(7, ("Lock released for bank 0x%x\n", bank));
+
+ MALI_DEBUG_PRINT_IF(7, NULL != block, ("Block 0x%x allocated\n", block));
+
+ return block;
+}
+
+
+static void block_release(mali_memory_block * block)
+{
+ mali_memory_bank * bank;
+ u32 current_order;
+
+ if (NULL == block) return;
+
+ bank = block->bank;
+
+ /* we're manipulating the free list, so we need to lock it */
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ set_block_free(block, 1);
+ current_order = get_block_order(block);
+
+ while (current_order <= bank->max_order)
+ {
+ mali_memory_block * buddy_block;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ if (!block_is_valid_buddy(buddy_block, current_order)) break;
+ _mali_osk_list_delinit(&buddy_block->link); /* remove from free list */
+ /* clear tracked data in both blocks */
+ set_block_order(block, 0);
+ set_block_free(block, 0);
+ set_block_order(buddy_block, 0);
+ set_block_free(buddy_block, 0);
+ /* make the parent control the new state */
+ block = block_get_parent(block, current_order - bank->min_order);
+ set_block_order(block, current_order + 1); /* merged has a higher order */
+ set_block_free(block, 1); /* mark it as free */
+ current_order++;
+ if (get_block_toplevel(block) == current_order) break; /* stop the merge if we've arrived at a toplevel block */
+ }
+
+ _mali_osk_list_add(&block->link, &bank->freelist[current_order - bank->min_order]);
+
+ /* update bank usage statistics */
+ _mali_osk_atomic_dec(&block->bank->num_active_allocations);
+
+ /* !critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return;
+}
+
+MALI_STATIC_INLINE u32 block_get_offset(mali_memory_block * block)
+{
+ return block - block->bank->blocklist;
+}
+
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return (block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block)) + block->bank->cpu_usage_adjust;
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block)
+{
+ if (NULL != block) return 1 << get_block_order(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mapping;
+ else return NULL;
+}
+
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr)
+{
+ if (NULL != block) block->mapping = ptr;
+}
+
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mmap_cookie;
+ else return 0;
+}
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie)
+{
+ if (NULL != block) block->mmap_cookie = cookie;
+}
+
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* create the session data object */
+ MALI_CHECK_NON_NULL(session_data = _mali_osk_malloc(sizeof(memory_session)), _MALI_OSK_ERR_NOMEM);
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->memory_head); /* no memory in use */
+ session_data->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == session_data->lock)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ *slot = session_data; /* slot will point to our data object */
+
+ MALI_SUCCESS;
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ if (NULL == *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL memory_session found in current session object"));
+ return;
+ }
+
+ _mali_osk_lock_wait(((memory_session*)*slot)->lock, _MALI_OSK_LOCKMODE_RW);
+ session_data = (memory_session *)*slot;
+ /* clear our slot */
+ *slot = NULL;
+
+ /*
+ First free all memory still being used.
+ This can happen if the caller has leaked memory or
+ the application has crashed forcing an auto-session end.
+ */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_block * block, * temp;
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ /* use the _safe version since fre_big_block removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(block, temp, &session_data->memory_head, mali_memory_block, link)
+ {
+ _mali_osk_errcode_t err;
+ _mali_uk_free_big_block_s uk_args;
+
+ MALI_DEBUG_PRINT(4, ("Freeing block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /* free the block */
+ /** @note manual type safety check-point */
+ uk_args.ctx = mali_session_data;
+ uk_args.cookie = (u32)block->descriptor;
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, &uk_args );
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT_ERROR(("_mali_ukk_free_big_block_internal() failed during session termination on block with cookie==0x%X\n",
+ uk_args.cookie)
+ );
+ }
+ }
+ }
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_term(session_data->lock);
+
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ mali_memory_bank * bank, *temp;
+ _mali_mem_info **mem_info_tail;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ mem_info_tail = &info->mem_info;
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ _mali_mem_info * mem_info;
+
+ mem_info = (_mali_mem_info *)_mali_osk_calloc(1, sizeof(_mali_mem_info));
+ if (NULL == mem_info) return _MALI_OSK_ERR_NOMEM; /* memory already allocated will be freed by the caller */
+
+ /* set info */
+ mem_info->size = bank->size;
+ mem_info->flags = (_mali_bus_usage)bank->used_for_flags;
+ mem_info->maximum_order_supported = bank->max_order;
+ mem_info->identifier = (u32)bank;
+
+ /* add to system info linked list */
+ (*mem_info_tail) = mem_info;
+ mem_info_tail = &mem_info->next;
+ }
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err;
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* call backend */
+ err = mali_memory_bank_register(resource->base, resource->cpu_usage_adjust, resource->size, resource->flags, resource->alloc_order, resource->description);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* if backend refused the memory we have to release the region again */
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(err);
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ /* Not supported by the fixed block memory system */
+ MALI_DEBUG_PRINT(1, ("MMU resource not supported by non-MMU driver!\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_FUNC);
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ u32 data;
+ data = _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ data = _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t get_big_block(void * ukk_private, struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ _mali_uk_mem_mmap_s args_mmap = {0, };
+ int md;
+ mali_memory_block * block;
+ _mali_osk_errcode_t err;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (!args->type_id)
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* at least min block size */
+ if (MIN_BLOCK_SIZE > args->minimum_size_requested) args->minimum_size_requested = MIN_BLOCK_SIZE;
+
+ /* perform the actual allocation */
+ block = mali_memory_block_get(args->type_id, args->minimum_size_requested);
+ if ( NULL == block )
+ {
+ /* no memory available with requested type_id */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, block, &md))
+ {
+ block_release(block);
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ block->descriptor = md;
+
+
+ /* fill in response */
+ args->mali_address = block_mali_addr_get(block);
+ args->block_size = block_size_get(block);
+ args->cookie = (u32)md;
+ args->flags = block->bank->used_for_flags;
+
+ /* map the block into the process' address space */
+
+ /** @note manual type safety check-point */
+ args_mmap.ukk_private = (void *)args->ukk_private;
+ args_mmap.ctx = args->ctx;
+ args_mmap.size = args->block_size;
+ args_mmap.phys_addr = block_cpu_addr_get(block);
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ err = _mali_ukk_mem_mmap( &args_mmap );
+#else
+ err = _mali_osk_specific_indirect_mmap( &args_mmap );
+#endif
+
+ /* check if the mapping failed */
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ("Memory mapping failed 0x%x\n", args->cpuptr));
+ /* mapping failed */
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+
+ /* free the mali memory */
+ block_release(block);
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+
+ args->cpuptr = args_mmap.mapping;
+ block_mmap_cookie_set(block, args_mmap.cookie);
+ block_mapping_set(block, args->cpuptr);
+
+ MALI_DEBUG_PRINT(2, ("Mali memory 0x%x (size %d) mapped in process memory space at 0x%x\n", (void*)args->mali_address, args->block_size, args->cpuptr));
+
+ /* track memory in use for the session */
+ _mali_osk_list_addtail(&block->link, &session_data->memory_head);
+
+ /* memory assigned to the session, memory mapped into the process' view */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
+
+/* Internal code that assumes the memory session lock is held */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args)
+{
+ mali_memory_block * block = NULL;
+ _mali_osk_errcode_t err;
+ _mali_uk_mem_munmap_s args_munmap = {0,};
+
+ MALI_DEBUG_ASSERT_POINTER( mali_session_data );
+ MALI_DEBUG_ASSERT_POINTER( session_data );
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ err = mali_descriptor_mapping_get(session_data->descriptor_mapping, (int)args->cookie, (void**)&block);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release memory pages\n", (int)args->cookie));
+ MALI_ERROR(err);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ MALI_DEBUG_PRINT(4, ("Asked to free block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /** @note manual type safety check-point */
+ args_munmap.ctx = (void*)mali_session_data;
+ args_munmap.mapping = block_mapping_get( block );
+ args_munmap.size = block_size_get( block );
+ args_munmap.cookie = block_mmap_cookie_get( block );
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ _mali_ukk_mem_munmap( &args_munmap );
+#else
+ _mali_osk_specific_indirect_munmap( &args_munmap );
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Session data 0x%x, lock 0x%x\n", session_data, &session_data->lock));
+
+ /* unlink from session usage list */
+ MALI_DEBUG_PRINT(5, ("unlink from session usage list\n"));
+ _mali_osk_list_delinit(&block->link);
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, (int)args->cookie);
+
+ /* free the mali memory */
+ block_release(block);
+ MALI_DEBUG_PRINT(5, ("Block freed\n"));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t free_big_block( struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ _mali_osk_errcode_t err;
+ struct mali_session_data * mali_session_data;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ /* Must always verify this, since these are provided by the user */
+ MALI_CHECK_NON_NULL(mali_session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /** @note this has been separated out so that the session_end handler can call this while it has the memory_session lock held */
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, args );
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return err;
+}
+
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_FREE) & MISC_MASK_FREE;
+}
+
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state)
+{
+ if (state) block->misc |= (MISC_MASK_FREE << MISC_SHIFT_FREE);
+ else block->misc &= ~(MISC_MASK_FREE << MISC_SHIFT_FREE);
+}
+
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order)
+{
+ block->misc &= ~(MISC_MASK_ORDER << MISC_SHIFT_ORDER);
+ block->misc |= ((order & MISC_MASK_ORDER) << MISC_SHIFT_ORDER);
+}
+
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_ORDER) & MISC_MASK_ORDER;
+}
+
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level)
+{
+ block->misc |= ((level & MISC_MASK_TOPLEVEL) << MISC_SHIFT_TOPLEVEL);
+}
+
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_TOPLEVEL) & MISC_MASK_TOPLEVEL;
+}
+
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order)
+{
+ if (get_block_free(block) && (get_block_order(block) == order)) return 1;
+ else return 0;
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order)
+{
+ return block + ( (block_get_offset(block) ^ (1 << order)) - block_get_offset(block));
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order)
+{
+ return block + ((block_get_offset(block) & ~(1 << order)) - block_get_offset(block));
+}
+
+/* This handler registered to mali_mmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ _mali_osk_errcode_t ret;
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+
+ ret = _mali_osk_mem_mapregion_init( descriptor );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_init() failed\n"));
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ ret = _mali_osk_mem_mapregion_map( descriptor, 0, &descriptor->mali_address, descriptor->size );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_map() failed\n"));
+ _mali_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ args->mapping = descriptor->mapping;
+
+ /**
+ * @note we do not require use of mali_descriptor_mapping here:
+ * the cookie gets stored in the mali_memory_block struct, which itself is
+ * protected by mali_descriptor_mapping, and so this cookie never leaves
+ * kernel space (on any OS).
+ *
+ * In the MMU case, we must use a mali_descriptor_mapping, since on _some_
+ * OSs, the cookie leaves kernel space.
+ */
+ args->cookie = (u32)descriptor;
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_munmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+
+ /** see note in _mali_ukk_mem_mmap() - no need to use descriptor mapping */
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* args->mapping and args->size are also discarded. They are only necessary for certain do_munmap implementations. However, they could be used to check the descriptor at this point. */
+ _mali_osk_mem_mapregion_unmap( descriptor, 0, descriptor->size, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_mem_mapregion_term( descriptor );
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c
new file mode 100644
index 00000000000..7a48e27ede7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.c
@@ -0,0 +1,2924 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_mem_mmu.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+/**
+ * Used to disallow more than one core to run a MMU at the same time
+ *
+ * @note This value is hardwired into some systems' configuration files,
+ * which \em might not be a header file (e.g. some external data configuration
+ * file). Therefore, if this value is modified, its occurance must be
+ * \b manually checked for in the entire driver source tree.
+ */
+#define MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES 1
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_MASK = 0x07
+} mali_mmu_entry_flags;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0001, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0002, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x0003, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x004, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0005, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0006, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x0007, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0008 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_SOFT_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+} mali_mmu_status_bits;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the pointer to the huge user space virtual memory area
+ * used to access the Mali memory.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting the vm manipulation */
+
+ u32 mali_base_address; /**< Mali virtual memory area used by this session */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+
+ _mali_osk_list_t active_mmus; /**< The MMUs in this session, in increasing order of ID (so we can lock them in the correct order when necessary) */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+} memory_session;
+
+typedef struct mali_kernel_memory_mmu_idle_callback
+{
+ _mali_osk_list_t link;
+ void (*callback)(void*);
+ void * callback_argument;
+} mali_kernel_memory_mmu_idle_callback;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_memory_mmu
+{
+ int id; /**< ID of the MMU, no duplicate IDs may exist on the system */
+ const char * description; /**< Description text received from the resource manager to help identify the resource for people */
+ int irq_nr; /**< IRQ number */
+ u32 base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple MMU's into a list */
+ _mali_osk_irq_t *irq;
+ u32 flags; /**< Used to store if there is something special with this mmu. */
+
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the usage fields */
+ /* usage fields */
+ memory_session * active_session; /**< Active session, NULL if no session is active */
+ u32 usage_count; /**< Number of nested activations of the active session */
+ _mali_osk_list_t callbacks; /**< Callback registered for MMU idle notification */
+
+ int in_page_fault_handler;
+
+ _mali_osk_list_t session_link;
+} mali_kernel_memory_mmu;
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Fixed block memory subsystem startup function.
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem shutdown function.
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * MMU Memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Builds the memory overall memory list
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Fixed block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Fixed block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+
+/**
+ * Fixed block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Fixed block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource);
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource);
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static void _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+/**
+ * The MMU interrupt handler
+ * Upper half of the MMU interrupt processing.
+ * Called by the kernel when the MMU has triggered an interrupt.
+ * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
+ * @param irq The irq number (not used)
+ * @param dev_id Points to the MMU object being handled
+ * @param regs Registers of interrupted process (not used)
+ * @return Standard Linux interrupt result.
+ * Subset used by the driver is IRQ_HANDLED processed
+ * IRQ_NONE Not processed
+ */
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data);
+
+/**
+ * The MMU reset hander
+ * Bottom half of the MMU interrupt processing for page faults and bus errors
+ * @param work The item to operate on, NULL in our case
+ */
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half ( void *data );
+
+/**
+ * Read MMU register value
+ * Reads the contents of the specified register.
+ * @param unit The MMU to read from
+ * @param reg The register to read
+ * @return The contents of the register
+ */
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg);
+
+/**
+ * Write to a MMU register
+ * Writes the given value to the specified register
+ * @param unit The MMU to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val);
+
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+static void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory);
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+void mali_mmu_page_table_cache_destroy(void);
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+void mali_mmu_release_table_page(u32 pa);
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void);
+
+static void mali_free_empty_page_directory(void);
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void);
+
+static void mali_free_fault_flush_pages(void);
+
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu);
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+/* Head of the list of MMUs */
+static _MALI_OSK_LIST_HEAD(mmu_head);
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+/*
+ The fixed memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ mali_memory_core_terminate, /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = (mali_kernel_subsystem_identifier)-1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(2, ("MMU memory system initializing\n"));
+
+ /* save our subsystem id for later for use in slot lookup during session activation */
+ mali_subsystem_memory_id = id;
+
+ _MALI_OSK_INIT_LIST_HEAD(&mmu_head);
+
+ MALI_CHECK_NO_ERROR( mali_mmu_page_table_cache_create() );
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_dedicated_memory) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(OS_MEMORY, mali_memory_core_resource_os_memory) );
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, *temp_mmu;
+
+ MALI_DEBUG_PRINT(2, ("MMU memory system terminating\n"));
+
+ /* loop over all MMU units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ /* reset to defaults */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+
+ /* unregister the irq */
+ _mali_osk_irq_term(mmu->irq);
+
+ /* remove from the list of MMU's on the system */
+ _mali_osk_list_del(&mmu->list);
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion(mmu->base, mmu->mapping_size, mmu->mapped_registers);
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ }
+
+ /* free global helper pages */
+ mali_free_empty_page_directory();
+ mali_free_fault_flush_pages();
+
+ /* destroy the page table cache before shutting down backends in case we have a page table leak to report */
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+
+}
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+ _mali_osk_errcode_t err;
+ int i;
+ mali_io_address pd_mapped;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(2, ("MMU session begin\n"));
+
+ /* create the session data object */
+ session_data = _mali_osk_calloc(1, sizeof(memory_session));
+ MALI_CHECK_NON_NULL( session_data, _MALI_OSK_ERR_NOMEM );
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ err = mali_mmu_get_table_page(&session_data->page_directory, &pd_mapped);
+
+ session_data->page_directory_mapped = pd_mapped;
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(err);
+ }
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_directory_mapped );
+
+ MALI_DEBUG_PRINT(2, ("Page directory for session 0x%x placed at physical address 0x%08X\n", mali_session_data, session_data->page_directory));
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ /* mark each page table as not present */
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, sizeof(u32) * i, 0);
+ }
+
+ /* page_table_mapped[] is already set to NULL by _mali_osk_calloc call */
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->active_mmus);
+ session_data->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 128);
+ if (NULL == session_data->lock)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ *slot = session_data; /* slot will point to our data object */
+ MALI_DEBUG_PRINT(2, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(1, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+ int i;
+ const int num_page_table_entries = sizeof(session_data->page_entries_mapped) / sizeof(session_data->page_entries_mapped[0]);
+
+ MALI_DEBUG_PRINT(2, ("MMU session end\n"));
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ session_data = (memory_session *)*slot;
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+#ifndef MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+#if _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#error Indirect MMAP specified, but UKK does not have implicit MMAP cleanup. Current implementation does not handle this.
+#else
+
+ /* Free all memory engine allocations */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = mali_session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ _mali_ukk_mem_munmap_internal( &unmap_args );
+ }
+ }
+
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+#endif /* _MALI_OSK_SPECIFIC_INDIRECT_MMAP */
+#endif /* MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP */
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ /* free PTE memory */
+ if (session_data->page_directory_mapped && (_mali_osk_mem_ioread32(session_data->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+
+ if (MALI_INVALID_PAGE != session_data->page_directory)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ session_data->page_directory = MALI_INVALID_PAGE;
+ }
+
+ _mali_osk_lock_signal( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->lock );
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ /* clear our slot */
+ *slot = NULL;
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+
+ MALI_CHECK_NO_ERROR(mali_mmu_get_table_page(&mali_empty_page_directory, &mapping));
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+static void mali_free_empty_page_directory(void)
+{
+ if (MALI_INVALID_PAGE != mali_empty_page_directory)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32( mapping, i * sizeof(u32), data);
+ }
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(mali_page_fault_flush_data_page_mapping, 0);
+ fill_page(mali_page_fault_flush_page_table_mapping, mali_page_fault_flush_data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(mali_page_fault_flush_page_directory_mapping, mali_page_fault_flush_page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ mali_page_fault_flush_page_table_mapping = NULL;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ mali_page_fault_flush_data_page_mapping = NULL;
+ }
+ MALI_ERROR(err);
+}
+
+static void mali_free_fault_flush_pages(void)
+{
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_directory)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_directory);
+ mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_table)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_data_page)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* Report the allocators */
+ mali_allocation_engine_report_allocators( physical_memory_allocators );
+
+ /* allocate the helper pages */
+ MALI_CHECK_NO_ERROR( mali_allocate_empty_page_directory() );
+ if (_MALI_OSK_ERR_OK != mali_allocate_fault_flush_pages())
+ {
+ mali_free_empty_page_directory();
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* activate the empty page directory on all MMU's */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMUs activated\n"));
+ /* the MMU system is now active */
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ _mali_mem_info * mem_info;
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ info->has_mmu = 1;
+
+ mem_info = _mali_osk_calloc(1,sizeof(_mali_mem_info));
+ MALI_CHECK_NON_NULL( mem_info, _MALI_OSK_ERR_NOMEM );
+
+ mem_info->size = 2048UL * 1024UL * 1024UL;
+ mem_info->maximum_order_supported = 30;
+ mem_info->flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE;
+ mem_info->identifier = 0;
+
+ info->mem_info = mem_info;
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+
+ if (NULL != mali_memory_core_mmu_lookup(resource->mmu_id))
+ {
+ MALI_DEBUG_PRINT(1, ("Duplicate MMU ids found. The id %d is already in use\n", resource->mmu_id));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, MALI_MMU_REGISTERS_SIZE, resource->description))
+ {
+ /* specified addresses are already in used by another driver / the kernel */
+ MALI_DEBUG_PRINT(
+ 1, ("Failed to request MMU '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mmu = _mali_osk_calloc(1, sizeof(mali_kernel_memory_mmu));
+
+ if (NULL == mmu)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory for handling a MMU unit"));
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->list);
+
+ mmu->id = resource->mmu_id;
+ mmu->irq_nr = resource->irq;
+ mmu->flags = resource->flags;
+ mmu->base = resource->base;
+ mmu->mapping_size = MALI_MMU_REGISTERS_SIZE;
+ mmu->description = resource->description; /* no need to copy */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->session_link);
+ mmu->in_page_fault_handler = 0;
+
+ mmu->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 127-mmu->id);
+ if (NULL == mmu->lock)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create mmu lock\n"));
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* map the registers */
+ mmu->mapped_registers = _mali_osk_mem_mapioregion( mmu->base, mmu->mapping_size, mmu->description );
+ if (NULL == mmu->mapped_registers)
+ {
+ /* failed to map the registers */
+ MALI_DEBUG_PRINT(1, ("Failed to map MMU registers at 0x%08X\n", mmu->base));
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unreqregion(mmu->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X) mapped to 0x%08X\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1, mmu->mapped_registers
+ ));
+
+ /* setup MMU interrupt mask */
+ /* set all values to known defaults */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* setup MMU page directory pointer */
+ /* The mali_page_directory pointer is guaranteed to be 4kb aligned because we've used get_zeroed_page to accquire it */
+ /* convert the kernel virtual address into a physical address and set */
+
+ /* add to our list of MMU's */
+ _mali_osk_list_addtail(&mmu->list, &mmu_head);
+
+ mmu->irq = _mali_osk_irq_init(
+ mmu->irq_nr,
+ mali_kernel_memory_mmu_interrupt_handler_upper_half,
+ mali_kernel_memory_mmu_interrupt_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)mali_mmu_probe_irq_trigger,
+ (_mali_osk_irq_ack_t)mali_mmu_probe_irq_acknowledge,
+ mmu,
+ "mali_mmu_irq_handlers"
+ );
+ if (NULL == mmu->irq)
+ {
+ _mali_osk_list_del(&mmu->list);
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unmapioregion( mmu->base, mmu->mapping_size, mmu->mapped_registers );
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* set to a known state */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+
+ MALI_DEBUG_PRINT(2, ("MMU registered\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ MALI_DEBUG_CODE(u32 data = )
+ _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ MALI_DEBUG_CODE(data = )
+ _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = resource->alloc_order;
+
+ allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description);
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = resource->alloc_order;
+
+ /* do the lowlevel linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description );
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save lowlevel cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = resource->base;
+ cleanup_data->size = resource->size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 int_stat;
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ mmu = (mali_kernel_memory_mmu *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ /* check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+ if (0 == int_stat)
+ {
+ MALI_DEBUG_PRINT(5, ("Ignoring shared interrupt\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT); /* no bits set, we are sharing the IRQ line and someone else caused the interrupt */
+ }
+
+ MALI_DEBUG_PRINT(1, ("mali_kernel_memory_mmu_interrupt_handler_upper_half\n"));
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+
+ mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_PRINT(("Page fault on %s\n", mmu->description));
+
+ _mali_osk_irq_schedulework(mmu->irq);
+ }
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_PRINT(("Bus read error on %s\n", mmu->description));
+ /* clear interrupt flag */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* reenable it */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static void mali_kernel_mmu_bus_reset(mali_kernel_memory_mmu * mmu)
+{
+
+#if defined(USING_MALI200)
+ int i;
+ const int replay_buffer_check_interval = 10; /* must be below 1000 */
+ const int replay_buffer_max_number_of_checks = 100;
+#endif
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* add an extra reference while handling the page fault */
+ mmu->usage_count++;
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(4, ("Sending stop bus request to cores\n"));
+ /* request to stop the bus, but don't wait for it to actually stop */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, (u32)mmu);
+
+#if defined(USING_MALI200)
+ /* no new request will come from any of the connected cores from now
+ * we must now flush the playback buffer for any requests queued already
+ */
+ MALI_DEBUG_PRINT(4, ("Switching to the special page fault flush page directory\n"));
+ /* don't use the mali_mmu_activate_address_space function here as we can't stall the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_page_fault_flush_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ /* resume the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+ /* the MMU will now play back all the requests, all going to our special page fault flush data page */
+
+ /* just to be safe, check that the playback buffer is empty before continuing */
+ if (!mali_benchmark) {
+ for (i = 0; i < replay_buffer_max_number_of_checks; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY) break;
+ _mali_osk_time_ubusydelay(replay_buffer_check_interval);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, i == replay_buffer_max_number_of_checks, ("MMU: %s: Failed to flush replay buffer on page fault\n", mmu->description));
+ MALI_DEBUG_PRINT(1, ("Replay playback took %ld usec\n", i * replay_buffer_check_interval));
+ }
+#endif
+ /* notify all subsystems that the core should be reset once the bus is actually stopped */
+ MALI_DEBUG_PRINT(4,("Sending job abort command to subsystems\n"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, (u32)mmu);
+
+ /* reprogram the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ /* release the extra address space reference, will schedule */
+ mali_memory_core_mmu_release_address_space_reference(mmu);
+
+ /* resume normal operation */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, (u32)mmu);
+ MALI_DEBUG_PRINT(4, ("Page fault handling complete\n"));
+}
+
+void mali_kernel_mmu_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->description));
+
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void mali_kernel_mmu_force_bus_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ MALI_DEBUG_PRINT(1, ("Mali MMU: Force_bus_reset.\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+ mali_kernel_mmu_bus_reset(mmu);
+}
+
+
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 raw, fault_address, status;
+
+ if (NULL == data)
+ {
+ MALI_PRINT_ERROR(("MMU IRQ work queue: NULL argument"));
+ return; /* Error */
+ }
+ mmu = (mali_kernel_memory_mmu*)data;
+
+
+ MALI_DEBUG_PRINT(4, ("Locking subsystems\n"));
+ /* lock all subsystems */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP0_LOCK_SUBSYSTEM, (u32)mmu);
+
+ raw = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_RAWSTAT);
+ status = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
+ {
+ MALI_DEBUG_PRINT(1, ("MMU: Page fault bottom half: No Irq found.\n"));
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+ return;
+ }
+
+ mmu->in_page_fault_handler = 1;
+
+ fault_address = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+ MALI_PRINT(("Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->description)
+ );
+
+ if (NULL == mmu->active_session)
+ {
+ MALI_PRINT(("Spurious memory access detected from MMU %s\n", mmu->description));
+ }
+ else
+ {
+ MALI_PRINT(("Active page directory at 0x%08X\n", mmu->active_session->page_directory));
+ MALI_PRINT(("Info from page table for VA 0x%x:\n", (void*)fault_address));
+ MALI_PRINT(("DTE entry: PTE at 0x%x marked as %s\n",
+ (void*)(_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK),
+ _mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT ? "present" : "not present"
+ ));
+
+ if (_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped, MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)
+ {
+ mali_io_address pte;
+ u32 data;
+ pte = mmu->active_session->page_entries_mapped[MALI_MMU_PDE_ENTRY(fault_address)];
+ data = _mali_osk_mem_ioread32(pte, MALI_MMU_PTE_ENTRY(fault_address) * sizeof(u32));
+ MALI_PRINT(("PTE entry: Page at 0x%x, %s %s %s\n",
+ (void*)(data & ~MALI_MMU_FLAGS_MASK),
+ data & MALI_MMU_FLAGS_PRESENT ? "present" : "not present",
+ data & MALI_MMU_FLAGS_READ_PERMISSION ? "readable" : "",
+ data & MALI_MMU_FLAGS_WRITE_PERMISSION ? "writable" : ""
+ ));
+ }
+ else
+ {
+ MALI_PRINT(("PTE entry: Not present\n"));
+ }
+ }
+
+
+ mali_kernel_mmu_bus_reset(mmu);
+
+ mmu->in_page_fault_handler = 0;
+
+ /* unlock all subsystems */
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+
+}
+
+
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg)
+{
+ u32 val;
+
+ if (mali_benchmark) return 0;
+
+ val = _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_read addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32),val));
+
+ return val;
+}
+
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val)
+{
+ if (mali_benchmark) return;
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_write addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32), val));
+
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(3, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( args->phys_addr, args->size ) );
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = NULL;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 110);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == _mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* clear memory allocation */
+ fill_page(alloc->pages.mapping, 0);
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void* mali_memory_core_mmu_lookup(u32 id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* find an MMU with a matching id */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ if (id == mmu->id) return mmu;
+ }
+
+ /* not found */
+ return NULL;
+}
+
+void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory)
+{
+ const int delay_in_usecs = 10;
+ const int max_loop_count = 10;
+ int i;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark) {
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) break;
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Stall request failed, swapping anyway\n"));
+ }
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+}
+
+_mali_osk_errcode_t mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument)
+{
+ memory_session * requested_memory_session;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ MALI_DEBUG_ASSERT_POINTER(mali_session_data);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ MALI_DEBUG_PRINT(4, ("Asked to activate page table for session 0x%x on MMU %s\n", mali_session_data, mmu->description));
+ requested_memory_session = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ MALI_DEBUG_PRINT(5, ("Session 0x%x looked up as using memory session 0x%x\n", mali_session_data, requested_memory_session));
+
+ MALI_DEBUG_ASSERT_POINTER(requested_memory_session);
+
+ MALI_DEBUG_PRINT(7, ("Taking locks\n"));
+
+ _mali_osk_lock_wait(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == mmu->usage_count)
+ {
+ /* no session currently active, activate the requested session */
+ MALI_DEBUG_ASSERT(NULL == mmu->active_session);
+ mmu->active_session = requested_memory_session;
+ mmu->usage_count = 1;
+ MALI_DEBUG_PRINT(4, ("MMU idle, activating page directory 0x%08X on MMU %s\n", requested_memory_session->page_directory, mmu->description));
+ mali_mmu_activate_address_space(mmu, requested_memory_session->page_directory);
+ {
+ /* Insert mmu into the right place in the active_mmus list so that
+ * it is still sorted. The list must be sorted by ID so we can get
+ * the mutexes in the right order in
+ * _mali_ukk_mem_munmap_internal().
+ */
+ _mali_osk_list_t *entry;
+ for (entry = requested_memory_session->active_mmus.next;
+ entry != &requested_memory_session->active_mmus;
+ entry = entry->next)
+ {
+ mali_kernel_memory_mmu *temp = _MALI_OSK_LIST_ENTRY(entry, mali_kernel_memory_mmu, session_link);
+ if (mmu->id < temp->id)
+ break;
+ }
+ /* If we broke out, then 'entry' points to the list node of the
+ * first mmu with a greater ID; otherwise, it points to
+ * active_mmus. We want to add *before* this node.
+ */
+ _mali_osk_list_addtail(&mmu->session_link, entry);
+ }
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ /* Allow two cores to run in parallel if they come from the same session */
+ else if (
+ (mmu->in_page_fault_handler == 0) &&
+ (requested_memory_session == mmu->active_session ) &&
+ (0==(MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES & mmu->flags))
+ )
+ {
+ /* nested activation detected, just update the reference count */
+ MALI_DEBUG_PRINT(4, ("Nested activation detected, %d previous activations found\n", mmu->usage_count));
+ mmu->usage_count++;
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ else if (NULL != callback)
+ {
+ /* can't activate right now, notify caller on idle via callback */
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ int found = 0;
+
+ MALI_DEBUG_PRINT(3, ("The MMU is busy and is using a different address space, callback given\n"));
+ /* check for existing registration */
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ if (callback_object->callback == callback)
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ {
+ MALI_DEBUG_PRINT(5, ("Duplicate callback registration found, ignoring\n"));
+ /* callback already registered */
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(5,("New callback, registering\n"));
+ /* register the new callback */
+ callback_object = _mali_osk_malloc(sizeof(mali_kernel_memory_mmu_idle_callback));
+ if (NULL != callback_object)
+ {
+ MALI_DEBUG_PRINT(7,("Callback struct setup\n"));
+ callback_object->callback = callback;
+ callback_object->callback_argument = callback_argument;
+ _mali_osk_list_addtail(&callback_object->link, &mmu->callbacks);
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ }
+ }
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_signal(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_ERROR(err);
+}
+
+void mali_memory_core_mmu_release_address_space_reference(void* mmu_ptr)
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp;
+ mali_kernel_memory_mmu * mmu;
+ memory_session * session;
+
+ _MALI_OSK_LIST_HEAD(callbacks);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ session = mmu->active_session;
+
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_wait(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("Deactivation of address space on MMU %s, %d references exists\n", mmu->description, mmu->usage_count));
+ MALI_DEBUG_ASSERT(0 != mmu->usage_count);
+ mmu->usage_count--;
+ if (0 != mmu->usage_count)
+ {
+ MALI_DEBUG_PRINT(4, ("MMU still in use by this address space, %d references still exists\n", mmu->usage_count));
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ return;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Activating the empty page directory on %s\n", mmu->description));
+
+ /* last reference gone, deactivate current address space */
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+
+ /* unlink from session */
+ _mali_osk_list_delinit(&mmu->session_link);
+ /* remove the active session pointer */
+ mmu->active_session = NULL;
+
+ /* Notify all registered callbacks.
+ * We have to be clever here:
+ * We must call the callbacks with the spinlock unlocked and
+ * the callback list emptied to allow them to re-register.
+ * So we make a copy of the list, clears the list and then later call the callbacks on the local copy
+ */
+ /* copy list */
+ _MALI_OSK_INIT_LIST_HEAD(&callbacks);
+ _mali_osk_list_splice(&mmu->callbacks, &callbacks);
+ /* clear the original, allowing new registrations during the callback */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+
+ /* end of mmu manipulation, so safe to unlock */
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* then finally remove the (possible) session lock, supporting that no session was active (spurious page fault handling) */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp, &callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ (callback_object->callback)(callback_object->callback_argument);
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ }
+}
+
+void mali_memory_core_mmu_unregister_callback(void* mmu_ptr, void(*callback)(void*))
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ MALI_DEBUG_ASSERT_POINTER(callback);
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ if (callback_object->callback == callback)
+ {
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ break;
+ }
+ }
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ /* allocate page tables, if needed */
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address);
+ int last_pde_idx;
+ memory_session * session_data;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + _MALI_OSK_MALI_PAGE_SIZE + descriptor->size - 1);
+ }
+ else
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + descriptor->size - 1);
+ }
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ MALI_DEBUG_PRINT(4, ("allocating page tables for Mali virtual address space 0x%08X to 0x%08X\n", descriptor->mali_address, descriptor->mali_address + descriptor->size - 1));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page directory
+ * from the L2 cache if we add new page directory entries (PDEs) to the page directory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ if ( 0 == (_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT) )
+ {
+ u32 pte_phys;
+ mali_io_address pte_mapped;
+ _mali_osk_errcode_t err;
+
+ /* allocate a new page table */
+ MALI_DEBUG_ASSERT(0 == session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == session_data->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pte_phys, &pte_mapped);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ session_data->page_entries_mapped[i] = pte_mapped;
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_entries_mapped[i] );
+
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), pte_phys | MALI_MMU_FLAGS_PRESENT); /* mark page table as present */
+
+ /* update usage count */
+ session_data->page_entries_usage_count[i]++;
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ continue; /* continue loop */
+ }
+
+ MALI_DEBUG_PRINT(1, ("Page table alloc failed\n"));
+ break; /* abort loop, failed to allocate one or more page tables */
+ }
+ else
+ {
+ session_data->page_entries_usage_count[i]++;
+ }
+ }
+
+ if (i <= last_pde_idx)
+ {
+ /* one or more pages could not be allocated, release reference count for the ones we added one for */
+ /* adjust for the one which caused the for loop to be aborted */
+ i--;
+
+ while (i >= first_pde_idx)
+ {
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ session_data->page_entries_usage_count[i]--;
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ /* last reference removed */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+ }
+ i--;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus && 1 == page_dir_updated)
+ {
+ /*
+ * We have updated the page directory and have an active MMU using it, so invalidate it in the Mali L2 cache.
+ */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ int first_pde_idx;
+ int last_pde_idx;
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 left;
+ int i;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_directory_mapped);
+
+ mali_address = descriptor->mali_address;
+ mali_address_end = descriptor->mali_address + descriptor->size;
+ left = descriptor->size;
+
+ first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ MALI_DEBUG_PRINT(3, ("Zapping Mali MMU table for address 0x%08X size 0x%08X\n", mali_address, left));
+ MALI_DEBUG_PRINT(4, ("Zapping PDE %d through %d\n", first_pde_idx, last_pde_idx));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache to ensure that the memory is unmapped.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ const int size_inside_pte = left < 0x400000 ? left : 0x400000;
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_PRINT(4, ("PDE %d\n", i));
+
+ session_data->page_entries_usage_count[i]--;
+
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ }
+ else
+ {
+ int j;
+ const int first_pte_idx = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte_idx = MALI_MMU_PTE_ENTRY(mali_address + size_inside_pte - 1);
+
+ MALI_DEBUG_PRINT(4, ("Partial page table fill detected, zapping entries %d through %d (page table at 0x%08X)\n", first_pte_idx, last_pte_idx, MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)))));
+
+ for (j = first_pte_idx; j <= last_pte_idx; j++)
+ {
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[i], j * sizeof(u32), 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("zap complete\n"));
+
+ mali_address += size_inside_pte;
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ /* Invalidate the page we've just modified */
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+#endif
+ }
+ left -= size_inside_pte;
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if ((1 == page_dir_updated) && (1== has_active_mmus))
+ {
+ /* The page directory was also updated */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 current_phys_addr;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ current_phys_addr = *phys_addr;
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+ mali_address_end = descriptor->mali_address + offset + size;
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache when we have allocated more heap memory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", current_phys_addr, mali_address, size));
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped);
+
+ for ( ; mali_address < mali_address_end; mali_address += MALI_MMU_PAGE_SIZE, current_phys_addr += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), current_phys_addr | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ /*
+ * Invalidate the updated page table(s), incase they have been used for something
+ * else since last job start (invalidation of entire Mali L2 cache)
+ */
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->lock = session_data->lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ /* no need to lock the MMU as we own it already */
+ MALI_DEBUG_PRINT(5, ("Zapping the cache of mmu %s as it's using the page table we have updated\n", mmu->description));
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+ if (!mali_benchmark) {
+ while ( (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) == 0) _mali_osk_time_ubusydelay(1);
+ }
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* All ok, write out any information generated from this call */
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ /* All done */
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static void _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ memory_session * session_data;
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Stall the MMU(s) which is using the address space we're operating on.
+ * Note that active_mmus must be sorted in order of ID to avoid a mutex
+ * ordering violation.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ const int max_loop_count = 100;
+ const int sleep_duration = 1; /* must be below 1000 */
+ int i;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark)
+ {
+ for ( i = 0; i < max_loop_count; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) break;
+ _mali_osk_time_ubusydelay(sleep_duration);
+ }
+
+ MALI_DEBUG_PRINT_IF(3, max_loop_count == i, ("Stall failed, trying zap anyway\n"));
+ }
+ }
+
+ /* This function also removes the memory from the session's memory list */
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ /* any L2 maintenance was done during mali_allocation_engine_release_memory */
+ /* the session is locked, so the active mmu list should be the same */
+ /* zap the TLB and resume operation */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((memory_session*)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+ _mali_ukk_mem_munmap_internal( args );
+ /* descriptor is no longer valid - it may've been freed */
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Is called when the rendercore wants the mmu to give an interrupt */
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu)
+{
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_trigger\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu)
+{
+ u32 int_stat;
+
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char * comment, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial) MALI_DEBUG_PRINT(1, ("writereg %08X %08X # %s\n", where, what, comment));
+
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial)
+ {
+ int i;
+ for (i = 0; i < 256; i++)
+ {
+ MALI_DEBUG_PRINT(1, ("%08X: %08X %08X %08X %08X\n", phys_addr + 16*i, _mali_osk_mem_ioread32(page, (i*4 + 0) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 1) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 2) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 3) * sizeof(u32))));
+
+ }
+ }
+
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(memory_session * session_data, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != session_data->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_directory_mapped, session_data->page_directory, info, 0)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != session_data->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_entries_mapped[i], _mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info, 0)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(memory_session * session_data, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, session_data->page_directory, "set the page directory address", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info, 0));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h
new file mode 100644
index 00000000000..a199fd66b5a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_mmu.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_MMU_H__
+#define __MALI_KERNEL_MEM_MMU_H__
+
+#include "mali_kernel_session_manager.h"
+
+/**
+ * Lookup a MMU core by ID.
+ * @param id ID of the MMU to find
+ * @return NULL if ID not found or valid, non-NULL if a core was found.
+ */
+void* mali_memory_core_mmu_lookup(u32 id);
+
+/**
+ * Activate a user session with its address space on the given MMU.
+ * If the session can't be activated due to that the MMU is busy and
+ * a callback pointer is given, the callback will be called once the MMU becomes idle.
+ * If the same callback pointer is registered multiple time it will only be called once.
+ * Nested activations are supported.
+ * Each call must be matched by a call to @see mali_memory_core_mmu_release_address_space_reference
+ *
+ * @param mmu The MMU to activate the address space on
+ * @param mali_session_data The user session object which address space to activate
+ * @param callback Pointer to the function to call when the MMU becomes idle
+ * @param callback_arg Argument given to the callback
+ * @return 0 if the address space was activated, -EBUSY if the MMU was busy, -EFAULT in all other cases.
+ */
+int mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument);
+
+/**
+ * Release a reference to the current active address space.
+ * Once the last reference is released any callback(s) registered will be called before the function returns
+ *
+ * @note Caution must be shown calling this function with locks held due to that callback can be called
+ * @param mmu The mmu to release a reference to the active address space of
+ */
+void mali_memory_core_mmu_release_address_space_reference(void* mmu);
+
+/**
+ * Soft reset of MMU - needed after power up
+ *
+ * @param mmu_ptr The MMU pointer registered with the relevant core
+ */
+void mali_kernel_mmu_reset(void * mmu_ptr);
+
+void mali_kernel_mmu_force_bus_reset(void * mmu_ptr);
+
+/**
+ * Unregister a previously registered callback.
+ * @param mmu The MMU to unregister the callback on
+ * @param callback The function to unregister
+ */
+void mali_memory_core_mmu_unregister_callback(void* mmu, void(*callback)(void*));
+
+
+
+#endif /* __MALI_KERNEL_MEM_MMU_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c
new file mode 100644
index 00000000000..845de935ec9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, 106);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ const int allocation_order = 6; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+ void *virt;
+ const u32 pages_to_allocate = 1 << allocation_order;
+ const u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if ( (info->num_pages_allocated + pages_to_allocate > info->num_pages_max) && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if ( NULL == virt )
+ {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += pages_to_allocate;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h
new file mode 100644
index 00000000000..ff49f8a816a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c
new file mode 100644
index 00000000000..3b4ace05f01
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Non-fatal OOM, have to cleanup, stopped at offset %d for size %d\n", offset, descriptor->size));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Determine whether we need to remove this from a tracking list */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ engine->mali_address->release(descriptor);
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h
new file mode 100644
index 00000000000..80a2b4bb3a7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_memory_engine.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than commiting a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function must allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h
new file mode 100644
index 00000000000..ff1e153678c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_pp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PP_H__
+#define __MALI_KERNEL_PP_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_mali200;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only );
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_PP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c
new file mode 100644
index 00000000000..07bb894b1b4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+typedef struct mali_profiling_entry
+{
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static u32 profile_entry_count = 0;
+static _mali_osk_atomic_t profile_insert_index;
+static _mali_osk_atomic_t profile_entries_written;
+
+
+_mali_osk_errcode_t _mali_profiling_init(void)
+{
+ profile_entries = NULL;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+
+ lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_profiling_term(void)
+{
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock)
+ {
+ _mali_osk_lock_term(lock);
+ lock = NULL;
+ }
+}
+
+inline _mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
+ {
+ /*
+ * Not in recording mode, or buffer is full
+ * Decrement index again, and early out
+ */
+ _mali_osk_atomic_dec(&profile_insert_index);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ _mali_osk_atomic_inc(&profile_entries_written);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ _mali_osk_errcode_t ret;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_IDLE)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (args->limit > MALI_PROFILING_MAX_BUFFER_ENTRIES)
+ {
+ args->limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_entries = _mali_osk_malloc(args->limit * sizeof(mali_profiling_entry));
+ profile_entry_count = args->limit;
+ if (NULL == profile_entries)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ ret = _mali_timestamp_reset();
+
+ if (ret == _MALI_OSK_ERR_OK)
+ {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ }
+ else
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ return _mali_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ args->count = _mali_osk_atomic_read(&profile_insert_index);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ u32 index = args->index;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= _mali_osk_atomic_read(&profile_entries_written))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ args->timestamp = profile_entries[index].timestamp;
+ args->event_id = profile_entries[index].event_id;
+ args->data[0] = profile_entries[index].data[0];
+ args->data[1] = profile_entries[index].data[1];
+ args->data[2] = profile_entries[index].data[2];
+ args->data[3] = profile_entries[index].data[3];
+ args->data[4] = profile_entries[index].data[4];
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+ if (NULL != profile_entries)
+ {
+ _mali_osk_free(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h
new file mode 100644
index 00000000000..5d3aa6eb841
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_profiling.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PROFILING_H__
+#define __MALI_KERNEL_PROFILING_H__
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+
+#include <../../../include/cinstr/mali_cinstr_profiling_events_m200.h>
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_init(void);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_profiling_term(void);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 - First data parameter, depending on event_id specified.
+ * @param data1 - Second data parameter, depending on event_id specified.
+ * @param data2 - Third data parameter, depending on event_id specified.
+ * @param data3 - Fourth data parameter, depending on event_id specified.
+ * @param data4 - Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+#endif /* MALI_TIMELINE_PROFILING_ENABLED */
+
+#endif /* __MALI_KERNEL_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c
new file mode 100644
index 00000000000..1db8f3ad5ab
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.c
@@ -0,0 +1,2032 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk_list.h"
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h"
+#endif /* USING_MMU */
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif /* USING_MALI400_L2_CACHE */
+
+#define HANG_CHECK_MSECS_MIN 100
+#define HANG_CHECK_MSECS_MAX 2000 /* 2 secs */
+#define HANG_CHECK_MSECS_DEFAULT 500 /* 500 ms */
+
+#define WATCHDOG_MSECS_MIN (2*HANG_CHECK_MSECS_MIN)
+#define WATCHDOG_MSECS_MAX 3600000 /* 1 hour */
+#define WATCHDOG_MSECS_DEFAULT 900000 /* 15 mins */
+
+/* max value that will be converted from jiffies to msecs and written to job->render_time_msecs */
+#define JOB_MAX_JIFFIES 100000
+
+int mali_hang_check_interval = HANG_CHECK_MSECS_DEFAULT;
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+
+/* Subsystem entrypoints: */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id);
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+
+
+static void mali_core_subsystem_cleanup_all_renderunits(struct mali_core_subsystem* subsys);
+static void mali_core_subsystem_move_core_set_idle(struct mali_core_renderunit *core);
+
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem);
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session);
+
+static void find_and_abort(mali_core_session* session, u32 abort_id);
+
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core);
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub);
+#endif
+static void mali_core_subsystem_schedule(mali_core_subsystem*subsystem);
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status);
+
+static void mali_core_renderunit_irq_handler_remove(struct mali_core_renderunit *core);
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data);
+static void mali_core_irq_handler_bottom_half ( void *data );
+
+#if USING_MMU
+static void lock_subsystem(struct mali_core_subsystem * subsys);
+static void unlock_subsystem(struct mali_core_subsystem * subsys);
+#endif
+
+
+/**
+ * This will be one of the subsystems in the array of subsystems:
+ * static struct mali_kernel_subsystem * subsystems[];
+ * found in file: mali_kernel_core.c
+ *
+ * This subsystem is necessary for operations common to all rendercore
+ * subsystems. For example, mali_subsystem_mali200 and mali_subsystem_gp2 may
+ * share a mutex when RENDERCORES_USE_GLOBAL_MUTEX is non-zero.
+ */
+struct mali_kernel_subsystem mali_subsystem_rendercore=
+{
+ rendercore_subsystem_startup, /* startup */
+ rendercore_subsystem_terminate, /* shutdown */
+ NULL, /* load_complete */
+ NULL, /* system_info_fill */
+ NULL, /* session_begin */
+ NULL, /* session_end */
+#if USING_MMU
+ rendercore_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+} ;
+
+static _mali_osk_lock_t *rendercores_global_mutex = NULL;
+static u32 rendercores_global_mutex_is_held = 0;
+static u32 rendercores_global_mutex_owner = 0;
+
+/** The 'dummy' rendercore subsystem to allow global subsystem mutex to be
+ * locked for all subsystems that extend the ''rendercore'' */
+static mali_core_subsystem rendercore_dummy_subsystem = {0,};
+
+/*
+ * Rendercore Subsystem functions.
+ *
+ * These are exposed by mali_subsystem_rendercore
+ */
+
+/**
+ * @brief Initialize the Rendercore subsystem.
+ *
+ * This must be called before any other subsystem that extends the
+ * ''rendercore'' may be initialized. For example, this must be called before
+ * the following functions:
+ * - mali200_subsystem_startup(), from mali_subsystem_mali200
+ * - maligp_subsystem_startup(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_init(). They
+ * are related, in that mali_core_subsystem_init() may use the structures
+ * initialized by rendercore_subsystem_startup()
+ */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ rendercores_global_mutex_is_held = 0;
+ rendercores_global_mutex = _mali_osk_lock_init(
+ (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED),
+ 0, 129);
+
+ if (NULL == rendercores_global_mutex)
+ {
+ MALI_PRINT_ERROR(("Failed: _mali_osk_lock_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ rendercore_dummy_subsystem.name = "Rendercore Global Subsystem"; /* On the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = SUBSYSTEM_MAGIC_NR; /* To please the Subsystem Mutex code */
+
+#if MALI_GPU_UTILIZATION
+ if (mali_utilization_init() != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_lock_term(rendercores_global_mutex);
+ rendercores_global_mutex = NULL;
+ MALI_PRINT_ERROR(("Failed: mali_utilization_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if (_mali_profiling_init() != _MALI_OSK_ERR_OK)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Rendercore: Failed to initialize profiling, feature will be unavailable\n")) ;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex initialized\n")) ;
+ MALI_SUCCESS;
+}
+
+/**
+ * @brief Terminate the Rendercore subsystem.
+ *
+ * This must only be called \b after any other subsystem that extends the
+ * ''rendercore'' has been terminated. For example, this must be called \b after
+ * the following functions:
+ * - mali200_subsystem_terminate(), from mali_subsystem_mali200
+ * - maligp_subsystem_terminate(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_cleanup(), though,
+ * the subsystems that extend ''rendercore'' must still call
+ * mali_core_subsystem_cleanup() when they terminate.
+ */
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ /* Catch double-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_term();
+#endif
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_term();
+#endif
+
+ rendercore_dummy_subsystem.name = NULL; /* The original string was on the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = 0;
+
+ /* ASSERT that no-one's holding this */
+ MALI_DEBUG_PRINT_ASSERT( 0 == rendercores_global_mutex_is_held,
+ ("Rendercores' Global Mutex was held at termination time. Have the subsystems that extend ''rendercore'' been terminated?\n") );
+
+ _mali_osk_lock_term( rendercores_global_mutex );
+ rendercores_global_mutex = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex terminated\n")) ;
+}
+
+
+#if USING_MMU
+/**
+ * @brief Handle certain Rendercore subsystem broadcast notifications
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is non-zero, this handles the following messages:
+ * - MMU_KILL_STEP0_LOCK_SUBSYSTEM
+ * - MMU_KILL_STEP4_UNLOCK_SUBSYSTEM
+ *
+ * The purpose is to manage the Rendercode Global Mutex, which cannot be
+ * managed by any system that extends the ''rendercore''.
+ *
+ * All other messages must be handled by mali_core_subsystem_broadcast_notification()
+ *
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is 0, this function does nothing.
+ * Instead, the subsystem that extends the ''rendercore' \b must handle its
+ * own mutexes - refer to mali_core_subsystem_broadcast_notification().
+ *
+ * Used currently only for signalling when MMU has a pagefault
+ */
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ lock_subsystem( &rendercore_dummy_subsystem );
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ unlock_subsystem( &rendercore_dummy_subsystem );
+ break;
+
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+
+}
+#endif
+
+/*
+ * Functions inherited by the subsystems that extend the ''rendercore''.
+ */
+
+u32 mali_core_renderunit_register_read(mali_core_renderunit *core, u32 relative_address)
+{
+ u32 read_val;
+
+ #if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during read: Core:%s Addr:0x%04X\n",
+ core->description,relative_address));
+ return 0xDEADBEEF;
+ }
+ #endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) return 0;
+
+ if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to read from illegal register: 0x%04x in core: %s\n",
+ relative_address, core->description));
+ return 0xDEADBEEF;
+ }
+
+ read_val = _mali_osk_mem_ioread32(core->registers_mapped, relative_address);
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, read_val));
+
+ return read_val;
+}
+
+void mali_core_renderunit_register_read_array(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * result_array,
+ u32 nr_of_regs
+ )
+{
+ /* NOTE Do not use burst reads against the registers */
+
+ u32 i;
+
+ for(i=0; i<nr_of_regs; ++i)
+ {
+ result_array[i] = mali_core_renderunit_register_read(core, relative_address + i*4);
+ }
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+}
+
+void mali_core_renderunit_register_write(mali_core_renderunit *core, u32 relative_address, u32 new_val)
+{
+ #if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+ return;
+ }
+ #endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) return;
+
+ if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to write to illegal register: 0x%04x in core: %s",
+ relative_address, core->description));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("mali_core_renderunit_register_write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(core->registers_mapped, relative_address, new_val);
+}
+
+
+void mali_core_renderunit_register_write_array(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * source_array,
+ u32 nr_of_regs)
+{
+
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_write_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+
+ for( i = 0; i< nr_of_regs; i++)
+ {
+ mali_core_renderunit_register_write(core, relative_address + i*4, source_array[i]);
+ }
+}
+
+void mali_core_renderunit_timeout_function_hang_detection(void *arg)
+{
+ mali_bool action = MALI_FALSE;
+ mali_core_renderunit * core;
+
+ core = (mali_core_renderunit *) arg;
+ if( !core ) return;
+
+ /* if NOT idle OR has TIMED_OUT */
+ if ( !((CORE_WATCHDOG_TIMEOUT == core->state ) || (CORE_IDLE== core->state)) )
+ {
+ core->state = CORE_HANG_CHECK_TIMEOUT;
+ action = MALI_TRUE;
+ }
+
+ if(action) _mali_osk_irq_schedulework(core->irq);
+}
+
+
+void mali_core_renderunit_timeout_function(void *arg)
+{
+ mali_core_renderunit * core;
+ mali_bool is_watchdog;
+
+ core = (mali_core_renderunit *)arg;
+ if( !core ) return;
+
+ is_watchdog = MALI_TRUE;
+ if (mali_benchmark)
+ {
+ /* poll based core */
+ mali_core_job *job;
+ job = core->current_job;
+ if ( (NULL != job) &&
+ (0 != _mali_osk_time_after(job->watchdog_jiffies,_mali_osk_time_tickcount()))
+ )
+ {
+ core->state = CORE_POLL;
+ is_watchdog = MALI_FALSE;
+ }
+ }
+
+ if (is_watchdog)
+ {
+ MALI_DEBUG_PRINT(3, ("SW-Watchdog timeout: Core:%s\n", core->description));
+ core->state = CORE_WATCHDOG_TIMEOUT;
+ }
+
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_init(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_init: Core:%s\n", core->description));
+
+ _MALI_OSK_INIT_LIST_HEAD(&core->list) ;
+ core->timer = _mali_osk_timer_init();
+ if (NULL == core->timer)
+ {
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer, mali_core_renderunit_timeout_function, (void *)core);
+
+ core->timer_hang_detection = _mali_osk_timer_init();
+ if (NULL == core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init hang detection timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer_hang_detection, mali_core_renderunit_timeout_function_hang_detection, (void *)core);
+
+#if USING_MALI_PMM
+ /* Init no pending power downs */
+ core->pend_power_down = MALI_FALSE;
+
+ /* Register the core with the PMM - which powers it up */
+ if (_MALI_OSK_ERR_OK != malipmm_core_register( core->pmm_id ))
+ {
+ _mali_osk_timer_term(core->timer);
+ _mali_osk_timer_term(core->timer_hang_detection);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot register with PMM\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif /* USING_MALI_PMM */
+
+ core->error_recovery = MALI_FALSE;
+ core->in_detach_function = MALI_FALSE;
+ core->state = CORE_IDLE;
+ core->current_job = NULL;
+ core->magic_nr = CORE_MAGIC_NR;
+#if USING_MMU
+ core->mmu = NULL;
+#endif /* USING_MMU */
+
+ MALI_SUCCESS;
+}
+
+void mali_core_renderunit_term(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_term: Core:%s\n", core->description));
+
+ if (NULL != core->timer)
+ {
+ _mali_osk_timer_term(core->timer);
+ core->timer = NULL;
+ }
+ if (NULL != core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer_hang_detection);
+ core->timer_hang_detection = NULL;
+ }
+
+#if USING_MALI_PMM
+ /* Unregister the core with the PMM */
+ malipmm_core_unregister( core->pmm_id );
+#endif
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_map_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_map_registers: Core:%s\n", core->description)) ;
+ if( (0 == core->registers_base_addr) ||
+ (0 == core->size) ||
+ (NULL == core->description)
+ )
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure %u %u 0x%x;\n", core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(core->registers_base_addr, core->size, core->description))
+ {
+ MALI_PRINT_ERROR(("Could not request register region (0x%08X - 0x%08X) to core: %s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: request_mem_region: (0x%08X - 0x%08X) Core:%s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ }
+
+ core->registers_mapped = _mali_osk_mem_mapioregion( core->registers_base_addr, core->size, core->description );
+
+ if ( 0 == core->registers_mapped )
+ {
+ MALI_PRINT_ERROR(("Could not ioremap registers for %s .\n", core->description));
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) Core:%s\n",
+ (u32) core->registers_mapped,
+ ((u32)core->registers_mapped)+ core->size - 1,
+ core->description));
+ }
+
+ MALI_DEBUG_PRINT(4, ("Success: Mapping registers to core: %s\n",core->description));
+
+ MALI_SUCCESS;
+}
+
+/* Used by external renderunit_create<> function + other places */
+void mali_core_renderunit_unmap_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_unmap_registers: Core:%s\n", core->description));
+ if (0 == core->registers_mapped)
+ {
+ MALI_PRINT_ERROR(("Trying to unmap register-mapping with NULL from core: %s\n", core->description));
+ return;
+ }
+ _mali_osk_mem_unmapioregion(core->registers_base_addr, core->size, core->registers_mapped);
+ core->registers_mapped = 0;
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+}
+
+static void mali_core_renderunit_irq_handler_remove(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_irq_handler_remove: Core:%s\n", core->description));
+ _mali_osk_irq_term(core->irq);
+}
+
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr)
+{
+ mali_core_renderunit * core;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ if (subsys->number_of_cores <= mali_core_nr)
+ {
+ MALI_PRINT_ERROR(("Trying to get illegal mali_core_nr: 0x%x for %s", mali_core_nr, subsys->name));
+ return NULL;
+ }
+ core = (subsys->mali_core_array)[mali_core_nr];
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_get_mali_core_nr: Core:%s\n", core->description));
+ MALI_CHECK_CORE(core);
+ return core;
+}
+
+/* Is used by external function:
+ subsystem_startup<> */
+_mali_osk_errcode_t mali_core_subsystem_init(mali_core_subsystem* new_subsys)
+{
+ int i;
+
+ /* These function pointers must have been set on before calling this function */
+ if (
+ ( NULL == new_subsys->name ) ||
+ ( NULL == new_subsys->start_job ) ||
+ ( NULL == new_subsys->irq_handler_upper_half ) ||
+ ( NULL == new_subsys->irq_handler_bottom_half ) ||
+ ( NULL == new_subsys->get_new_job_from_user ) ||
+ ( NULL == new_subsys->return_job_to_user )
+ )
+ {
+ MALI_PRINT_ERROR(("Missing functions in subsystem."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_init: %s\n", new_subsys->name)) ;
+
+ /* Catch use-before-initialize/use-after-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+ new_subsys->magic_nr = SUBSYSTEM_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_idle_head); /* Idle cores of this type */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_off_head); /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduleing */
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->awaiting_sessions_head[i]);
+ }
+
+ /* Linked list of all sessions connected to this coretype */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->all_sessions_head);
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core ) break;
+ core->mmu = mali_memory_core_mmu_lookup(core->mmu_id);
+ MALI_DEBUG_PRINT(2, ("Attach mmu: 0x%x to core: %s in subsystem: %s\n", core->mmu, core->description, subsys->name));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+#endif
+
+/* This will register an IRQ handler, and add the core to the list of available cores for this subsystem. */
+_mali_osk_errcode_t mali_core_subsystem_register_renderunit(mali_core_subsystem* subsys, mali_core_renderunit * core)
+{
+ mali_core_renderunit ** mali_core_array;
+ u32 previous_nr;
+ u32 previous_size;
+ u32 new_nr;
+ u32 new_size;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* If any of these are 0 there is an error */
+ if(0 == core->subsystem ||
+ 0 == core->registers_base_addr ||
+ 0 == core->size ||
+ 0 == core->description)
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure 0x%x 0x%x 0x%x;\n",
+ core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Core: subsystem_register_renderunit: %s\n", core->description));
+
+ MALI_CHECK_NON_NULL(
+ core->irq = _mali_osk_irq_init(
+ core->irq_nr,
+ mali_core_irq_handler_upper_half,
+ mali_core_irq_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)subsys->probe_core_irq_trigger,
+ (_mali_osk_irq_ack_t)subsys->probe_core_irq_acknowledge,
+ core,
+ "mali_core_irq_handlers"
+ ),
+ _MALI_OSK_ERR_FAULT
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* Update which core number this is */
+ core->core_number = subsys->number_of_cores;
+
+ /* Update the array of cores in the subsystem. */
+ previous_nr = subsys->number_of_cores;
+ previous_size = sizeof(mali_core_renderunit*)*previous_nr;
+ new_nr = previous_nr + 1;
+ new_size = sizeof(mali_core_renderunit*)*new_nr;
+
+ if (0 != previous_nr)
+ {
+ if (NULL == subsys->mali_core_array)
+ {
+ MALI_PRINT_ERROR(("Internal error"));
+ goto exit_function;
+ }
+
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ _mali_osk_memcpy(mali_core_array, subsys->mali_core_array, previous_size);
+ _mali_osk_free( subsys->mali_core_array);
+ MALI_DEBUG_PRINT(5, ("Success: adding a new core to subsystem array %s\n", core->description) ) ;
+ }
+ else
+ {
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ MALI_DEBUG_PRINT(6, ("Success: adding first core to subsystem array %s\n", core->description) ) ;
+ }
+ subsys->mali_core_array = mali_core_array;
+ mali_core_array[previous_nr] = core;
+
+ /* Add the core to the list of available cores on the system */
+ _mali_osk_list_add(&(core->list), &(subsys->renderunit_idle_head));
+
+ /* Update total number of cores */
+ subsys->number_of_cores = new_nr;
+ MALI_DEBUG_PRINT(6, ("Success: mali_core_subsystem_register_renderunit %s\n", core->description));
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_SUCCESS;
+
+exit_function:
+ mali_core_renderunit_irq_handler_remove(core);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/**
+ * Called by the core when a system info update is needed
+ * We fill in info about all the core types available
+ * @param subsys Pointer to the core's @a mali_core_subsystem data structure
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t error code on failure
+ */
+_mali_osk_errcode_t mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info)
+{
+ u32 i;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; /* OK if no cores to update info for */
+ mali_core_renderunit * core;
+ _mali_core_info **core_info_nextp;
+ _mali_core_info * cinfo;
+
+ MALI_DEBUG_PRINT(4, ("mali_core_subsystem_system_info_fill: %s\n", subsys->name) ) ;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ core_info_nextp = &(info->core_info);
+ cinfo = info->core_info;
+
+ while(NULL!=cinfo)
+ {
+ core_info_nextp = &(cinfo->next);
+ cinfo = cinfo->next;
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core )
+ {
+ err = _MALI_OSK_ERR_FAULT;
+ goto early_exit;
+ }
+ cinfo = (_mali_core_info *)_mali_osk_calloc(1, sizeof(_mali_core_info));
+ if ( NULL==cinfo )
+ {
+ err = _MALI_OSK_ERR_NOMEM;
+ goto early_exit;
+ }
+ cinfo->version = core->core_version;
+ cinfo->type =subsys->core_type;
+ cinfo->reg_address = core->registers_base_addr;
+ cinfo->core_nr = i;
+ cinfo->next = NULL;
+ /* Writing this address to the previous' *(&next) ptr */
+ *core_info_nextp = cinfo;
+ /* Setting the next_ptr to point to &this->next_ptr */
+ core_info_nextp = &(cinfo->next);
+ }
+early_exit:
+ if ( _MALI_OSK_ERR_OK != err) MALI_PRINT_ERROR(("Error: In mali_core_subsystem_system_info_fill %d\n", err));
+ MALI_DEBUG_CODE(
+ cinfo = info->core_info;
+
+ MALI_DEBUG_PRINT(3, ("Current list of cores\n"));
+ while( NULL != cinfo )
+ {
+ MALI_DEBUG_PRINT(3, ("Type: 0x%x\n", cinfo->type));
+ MALI_DEBUG_PRINT(3, ("Version: 0x%x\n", cinfo->version));
+ MALI_DEBUG_PRINT(3, ("Reg_addr: 0x%x\n", cinfo->reg_address));
+ MALI_DEBUG_PRINT(3, ("Core_nr: 0x%x\n", cinfo->core_nr));
+ MALI_DEBUG_PRINT(3, ("Flags: 0x%x\n", cinfo->flags));
+ MALI_DEBUG_PRINT(3, ("*****\n"));
+ cinfo = cinfo->next;
+ }
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/* Is used by external function:
+ subsystem_terminate<> */
+void mali_core_subsystem_cleanup(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_cleanup: %s\n", subsys->name )) ;
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+#if USING_MMU
+ if (NULL != core->mmu)
+ {
+ /* the MMU is attached in the load_complete callback, which will never be called if the module fails to load, handle that case */
+ mali_memory_core_mmu_unregister_callback(core->mmu, mali_core_subsystem_callback_schedule_wrapper);
+ }
+#endif
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ mali_core_renderunit_irq_handler_remove(core);
+
+ /* When a process terminates, all cores running jobs from that process is reset and put to idle.
+ That means that when the module is unloading (this code) we are guaranteed that all cores are idle.
+ However: if something (we can't think of) is really wrong, a core may give an interrupt during this
+ unloading, and we may now in the code have a bottom-half-processing pending from the interrupts
+ we deregistered above. To be sure that the bottom halves do not access the structures after they
+ are deallocated we flush the bottom-halves processing here, before the deallocation. */
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+#if USING_MALI_PMM
+ /* Only reset when we are using PMM and the core is not off */
+#if MALI_PMM_NO_PMU
+ /* We need to reset when there is no PMU - but this will
+ * cause the register read/write functions to report an
+ * error (hence the if to check for CORE_OFF below) we
+ * change state to allow the reset to happen.
+ */
+ core->state = CORE_IDLE;
+#endif
+ if( core->state != CORE_OFF )
+ {
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+ }
+#else
+ /* Always reset the core */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+#endif
+
+ mali_core_renderunit_unmap_registers(core);
+
+ _mali_osk_list_delinit(&core->list);
+
+ mali_core_renderunit_term(core);
+
+ subsys->renderunit_delete(core);
+ }
+
+ mali_core_subsystem_cleanup_all_renderunits(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT(6, ("SUCCESS: mali_core_subsystem_cleanup: %s\n", subsys->name )) ;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL != number_of_cores )
+ {
+ *number_of_cores = subsystem->number_of_cores;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_number_of_cores_get: %s: %u\n", subsystem->name, *number_of_cores) ) ;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ err = subsystem->get_new_job_from_user(session, job_data);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_ERROR(err);
+}
+
+
+/* We return the version number to the first core in this subsystem */
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit * core0;
+ u32 nr_return;
+
+ subsystem = session->subsystem;
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+
+ core0 = mali_core_renderunit_get_mali_core_nr(subsystem, 0);
+
+ if( NULL == core0 )
+ {
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ nr_return = core0->core_version;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_core_version_get: %s: %u\n", subsystem->name, nr_return )) ;
+
+ *version = nr_return;
+
+ MALI_SUCCESS;
+}
+
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id)
+{
+ find_and_abort(session, id);
+}
+
+static mali_bool job_should_be_aborted(mali_core_job *job, u32 abort_id)
+{
+ if ( job->abort_id == abort_id ) return MALI_TRUE;
+ else return MALI_FALSE;
+}
+
+static void find_and_abort(mali_core_session* session, u32 abort_id)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp;
+ mali_core_job *job;
+
+ subsystem = session->subsystem;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ job = session->job_waiting_to_run;
+ if ( (job!=NULL) && job_should_be_aborted (job, abort_id) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, from the waiting_to_run slot.\n", subsystem->name, abort_id ));
+ session->job_waiting_to_run = NULL;
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ subsystem->return_job_to_user( job , JOB_STATUS_END_ABORT);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY( core, tmp, &session->renderunits_working_head, mali_core_renderunit, list )
+ {
+ job = core->current_job;
+ if ( (job!=NULL) && (job_should_be_aborted (job, abort_id) ) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, which is currently running on mali.\n", subsystem->name, abort_id ));
+ if ( core->state==CORE_IDLE )
+ {
+ MALI_PRINT_ERROR(("Aborting core with running job which is idle. Must be something very wrong."));
+ goto end_bug;
+ }
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_ABORT);
+ }
+ }
+end_bug:
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void *argument)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ if ( NULL != subsystem->suspend_response)
+ {
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE start\n"));
+ err = subsystem->suspend_response(session, argument);
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE end\n"));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ return err;
+}
+
+
+/* Is used by internal function:
+ mali_core_subsystem_cleanup<>s */
+/* All cores should be removed before calling this function
+Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_cleanup_all_renderunits(mali_core_subsystem* subsys)
+{
+ int i;
+ _mali_osk_free(subsys->mali_core_array);
+ subsys->number_of_cores = 0;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_cleanup_all_renderunits: %s\n", subsys->name) ) ;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_idle_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_idle should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_idle_head)) ;
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_off_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_off should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_off_head)) ;
+ }
+
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ if ( ! _mali_osk_list_empty(&(subsys->awaiting_sessions_head[i])))
+ {
+ MALI_PRINT_ERROR(("List awaiting_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->awaiting_sessions_head[i])) ;
+ subsys->awaiting_sessions_sum_all_priorities = 0;
+ }
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->all_sessions_head)))
+ {
+ MALI_PRINT_ERROR(("List all_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->all_sessions_head)) ;
+ }
+}
+
+/* Is used by internal functions:
+ mali_core_irq_handler_bottom_half<>;
+ mali_core_subsystem_schedule<>; */
+/* Will release the core.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_idle(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+#if USING_MALI_PMM
+ mali_core_status oldstatus;
+#endif
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ _mali_osk_timer_del(core->timer);
+ _mali_osk_timer_del(core->timer_hang_detection);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_idle: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+
+#if USING_MALI_PMM
+
+ oldstatus = core->state;
+
+ if( core->pend_power_down )
+ {
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+ /* Done the move from the active queues, so the pending power down can be done */
+ core->pend_power_down = MALI_FALSE;
+ malipmm_core_power_down_okay( core->pmm_id );
+ }
+ else
+ {
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+ }
+
+ if( CORE_OFF != oldstatus )
+ {
+ /* Message that this core is now idle or in fact off */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_FINISHED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+#if USING_MMU
+ /* Only free the reference when entering idle state from
+ * anything other than power off
+ */
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif /* USING_MMU */
+ }
+
+
+#else /* !USING_MALI_PMM */
+
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+
+#if USING_MMU
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif
+
+#endif /* USING_MALI_PMM */
+}
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_set_working(mali_core_renderunit *core, mali_core_job *job)
+{
+ mali_core_subsystem *subsystem;
+ mali_core_session *session;
+
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_set_working: %s\n", core->description) ) ;
+
+ core->current_job = job ;
+ core->state = CORE_WORKING ;
+ job->start_time_jiffies = _mali_osk_time_tickcount();
+ _mali_osk_list_move( &core->list, &session->renderunits_working_head );
+
+}
+
+#if USING_MALI_PMM
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_off(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ /* Cores must be idle before powering off */
+ MALI_DEBUG_ASSERT(core->state == CORE_IDLE);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_off: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+}
+
+#endif /* USING_MALI_PMM */
+
+/* Is used by internal function:
+ mali_core_subsystem_schedule<>; */
+/* Returns the job with the highest priority for the subsystem. NULL if none*/
+/* Must hold subsystem_mutex before entering this function */
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem)
+{
+ int i;
+
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities )
+ {
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_get_waiting_job: No awaiting session found\n"));
+ return NULL;
+ }
+
+ for( i=0; i<PRIORITY_LEVELS ; ++i)
+ {
+ if (!_mali_osk_list_empty(&subsystem->awaiting_sessions_head[i]))
+ {
+ return _MALI_OSK_LIST_ENTRY(subsystem->awaiting_sessions_head[i].next, mali_core_session, awaiting_sessions_list);
+ }
+ }
+
+ return NULL;
+}
+
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session)
+{
+ mali_core_job *job;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ _mali_osk_list_delinit(&session->awaiting_sessions_list);
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ job = session->job_waiting_to_run;
+ session->job_waiting_to_run = NULL;
+ MALI_CHECK_JOB(job);
+ return job;
+}
+
+/* Is used by internal functions:
+ mali_core_subsystem_schedule<> */
+/* This will start the job on the core. It will also release the core if it did not start.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core)
+{
+ mali_core_session *session;
+ mali_core_subsystem *subsystem;
+ _mali_osk_errcode_t err;
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_CHECK_SESSION(session);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: job_start_on_core: job=0x%x, session=0x%x, core=%s\n", job, session, core->description));
+
+ MALI_DEBUG_ASSERT(NULL == core->current_job) ;
+ MALI_DEBUG_ASSERT(CORE_IDLE == core->state );
+
+ mali_core_subsystem_move_set_working(core, job);
+
+#if defined USING_MALI400_L2_CACHE
+ /* Invalidate the L2 cache */
+ if (_MALI_OSK_ERR_OK != mali_kernel_l2_cache_invalidate_all() )
+ {
+ MALI_DEBUG_PRINT(4, ("Core: Clear of L2 failed, return job. System may not be usable for some reason.\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_SYSTEM_UNUSABLE );
+ return;
+ }
+#endif
+
+ /* Tries to start job on the core. Returns MALI_FALSE if the job could not be started */
+ err = subsystem->start_job(job, core);
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_core_start();
+#endif
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ /* This will happen only if there is something in the job object
+ which make it inpossible to start. Like if it require illegal memory.*/
+ MALI_DEBUG_PRINT(4, ("Core: start_job failed, return job and putting core back into idle list\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_ILLEGAL_JOB );
+ }
+ else
+ {
+ u32 delay = _mali_osk_time_mstoticks(job->watchdog_msecs)+1;
+ job->watchdog_jiffies = _mali_osk_time_tickcount() + delay;
+ if (mali_benchmark)
+ {
+ _mali_osk_timer_add(core->timer, 1);
+ }
+ else
+ {
+ _mali_osk_timer_add(core->timer, delay);
+ }
+ }
+}
+
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub)
+{
+ mali_core_subsystem * subsystem;
+ subsystem = (mali_core_subsystem *)sub;
+ MALI_DEBUG_PRINT(3, ("MMU: Is schedulling subsystem: %s\n", subsystem->name));
+ mali_core_subsystem_schedule(subsystem);
+}
+#endif
+
+/* Is used by internal function:
+ mali_core_irq_handler_bottom_half
+ mali_core_session_add_job
+*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_schedule(mali_core_subsystem * subsystem)
+{
+ mali_core_renderunit *core, *tmp;
+ mali_core_session *session;
+ mali_core_job *job;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_schedule: %s\n", subsystem->name )) ;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ /* First check that there are sessions with jobs waiting to run */
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: No jobs available for %s\n", subsystem->name) ) ;
+ return;
+ }
+
+ /* Returns the session with the highest priority job for the subsystem. NULL if none*/
+ session = mali_core_subsystem_get_waiting_session(subsystem);
+
+ if (NULL == session)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: No runnable job found\n"));
+ return;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+#if USING_MMU
+ int err = mali_memory_core_mmu_activate_page_table(core->mmu, session->mmu_session, mali_core_subsystem_callback_schedule_wrapper, subsystem);
+ if (0 == err)
+ {
+ /* core points to a core where the MMU page table activation succeeded */
+#endif
+ /* This will remove the job from queue system */
+ job = mali_core_subsystem_release_session_get_job(subsystem, session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Got a job 0x%x\n", job));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there is a job scheduled to run
+ * NOTE: mali_core_job_start_on_core() can fail to start
+ * the job for several reasons, but it will move the core
+ * back to idle which will create the FINISHED message
+ * so we can still say that the job is SCHEDULED
+ */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_SCHEDULED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif
+ /* This will {remove core from freelist AND start the job on the core}*/
+ mali_core_job_start_on_core(job, core);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Job started, done\n"));
+ return;
+#if USING_MMU
+ }
+#endif
+ }
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Could not activate MMU. Scheduelling postponed to MMU, checking next.\n"));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there are jobs to run */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_QUEUED,
+ 0 };
+ if( subsystem->core_type == _MALI_GP2 || subsystem->core_type == _MALI_400_GP )
+ {
+ event.data = MALI_PMM_CORE_GP;
+ }
+ else
+ {
+ /* Check the PP is supported by the PMM */
+ MALI_DEBUG_ASSERT( subsystem->core_type == _MALI_200 || subsystem->core_type == _MALI_400_PP );
+ /* We state that all PP cores are scheduled to inform the PMM
+ * that it may need to power something up!
+ */
+ event.data = MALI_PMM_CORE_PP_ALL;
+ }
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif /* USING_MALI_PMM */
+
+}
+
+/* Is used by external function:
+ session_begin<> */
+void mali_core_session_begin(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL == subsystem )
+ {
+ MALI_PRINT_ERROR(("Missing data in struct\n"));
+ return;
+ }
+ MALI_DEBUG_PRINT(2, ("Core: session_begin: for %s\n", session->subsystem->name )) ;
+
+ session->magic_nr = SESSION_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head);
+
+ session->job_waiting_to_run = NULL;
+ _MALI_OSK_INIT_LIST_HEAD(&session->awaiting_sessions_list);
+ _MALI_OSK_INIT_LIST_HEAD(&session->all_sessions_list);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ _mali_osk_list_add(&session->all_sessions_list, &session->subsystem->all_sessions_head);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_begin: for %s DONE\n", session->subsystem->name) ) ;
+}
+
+#if USING_MMU
+static void mali_core_renderunit_stop_bus(mali_core_renderunit* core)
+{
+ core->subsystem->stop_bus(core);
+}
+#endif
+
+void mali_core_session_close(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+
+ subsystem = session->subsystem;
+ MALI_DEBUG_ASSERT_POINTER(subsystem);
+
+ MALI_DEBUG_PRINT(2, ("Core: session_close: for %s\n", session->subsystem->name) ) ;
+
+ /* We must grab subsystem mutex since the list this session belongs to
+ is owned by the subsystem */
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ /* Return the potensial waiting job to user */
+ if ( session->job_waiting_to_run )
+ {
+ subsystem->return_job_to_user( session->job_waiting_to_run, JOB_STATUS_END_SHUTDOWN );
+ session->job_waiting_to_run = NULL;
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ }
+
+ /* Kill active cores working for this session - freeing their jobs
+ Since the handling of one core also could stop jobs from another core, there is a while loop */
+ while ( ! _mali_osk_list_empty(&session->renderunits_working_head) )
+ {
+ core = _MALI_OSK_LIST_ENTRY(session->renderunits_working_head.next, mali_core_renderunit, list);
+ MALI_DEBUG_PRINT(3, ("Core: session_close: Core was working: %s\n", core->description )) ;
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_SHUTDOWN );
+ break;
+ }
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head); /* Not necessary - we will _mali_osk_free session*/
+
+ /* Remove this session from the global sessionlist */
+ _mali_osk_list_delinit(&session->all_sessions_list);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_close: for %s FINISHED\n", session->subsystem->name )) ;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+/* Must hold subsystem_mutex before entering this function */
+_mali_osk_errcode_t mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return)
+{
+ mali_core_subsystem * subsystem;
+
+ job->magic_nr = JOB_MAGIC_NR;
+ MALI_CHECK_SESSION(session);
+
+ subsystem = session->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_add_job: for %s\n", subsystem->name )) ;
+
+ /* Setting the default value; No job to return */
+ MALI_DEBUG_ASSERT_POINTER(job_return);
+ *job_return = NULL;
+
+ if ( NULL != session->job_waiting_to_run)
+ {
+ MALI_DEBUG_PRINT(5, ("The session already had a job waiting\n")) ;
+ /* Checing if the new job has a higher priority than the one that was pending.*/
+ if ( job_has_higher_priority(job,session->job_waiting_to_run))
+ {
+ /* Remove this session from current priority */
+ _mali_osk_list_del( &(session->awaiting_sessions_list));
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ /* Returning the previous waiting job through the input double pointer*/
+ *job_return = session->job_waiting_to_run;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Illegal internal state."));
+ /* There was a job waiting in this session, and the priority of this job
+ we try to add was NOT higher. Return -1 indicated new job NOT enqueued.*/
+ /* We check prior to calling this function that we are not in this state.*/
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ /* Continue to add the new job as the next job from this session */
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job job=0x%x\n", job));
+
+ /* Adding this session to the subsystem list of sessions with pending job, with priority */
+ session->job_waiting_to_run = job;
+
+ _mali_osk_list_addtail( &(session->awaiting_sessions_list), &(subsystem->awaiting_sessions_head[job->priority]));
+ subsystem->awaiting_sessions_sum_all_priorities++;
+
+ mali_core_subsystem_schedule(subsystem);
+
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job: for %s FINISHED\n", session->subsystem->name )) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali_core_job_set_run_time(mali_core_job * job)
+{
+ u32 jiffies_used;
+ jiffies_used = _mali_osk_time_tickcount() - job->start_time_jiffies;
+ if ( jiffies_used > JOB_MAX_JIFFIES )
+ {
+ MALI_PRINT_ERROR(("Job used too many jiffies: %d\n", jiffies_used ));
+ jiffies_used = 0;
+ }
+ job->render_time_msecs = _mali_osk_time_tickstoms(jiffies_used);
+}
+
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status)
+{
+ mali_core_job * job;
+ mali_core_subsystem * subsystem;
+ mali_bool already_in_detach_function;
+
+ job = core->current_job;
+ subsystem = core->subsystem;
+ MALI_DEBUG_ASSERT(CORE_IDLE != core->state);
+
+ /* The reset_core() called some lines below might call this detach
+ * funtion again. To protect the core object from being modified by
+ * recursive calls, the in_detach_function would track if it is an recursive call
+ */
+ already_in_detach_function = core->in_detach_function;
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ core->in_detach_function = MALI_TRUE;
+ if ( NULL != job )
+ {
+ mali_core_job_set_run_time(job);
+ core->current_job = NULL;
+ }
+ }
+
+ if (JOB_STATUS_END_SEG_FAULT == end_status)
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_HARD );
+ }
+ else
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ if ( CORE_IDLE != core->state )
+ {
+ #if MALI_GPU_UTILIZATION
+ mali_utilization_core_end();
+ #endif
+ mali_core_subsystem_move_core_set_idle(core);
+ }
+
+ core->in_detach_function = MALI_FALSE;
+
+ if ( SUBSYSTEM_RESCHEDULE == reschedule )
+ {
+ mali_core_subsystem_schedule(subsystem);
+ }
+ if ( NULL != job )
+ {
+ core->subsystem->return_job_to_user(job, end_status);
+ }
+ }
+}
+
+#if USING_MMU
+/* This function intentionally does not release the semaphore. You must run
+ stop_bus_for_all_cores(), reset_all_cores_on_mmu() and continue_job_handling()
+ after calling this function, and then call unlock_subsystem() to release the
+ semaphore. */
+
+static void lock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only stops cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are stopped.
+*/
+static void stop_bus_for_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(2,("Handling: bus stop %s\n", subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We stop only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Stopping bus on core %s\n", core->description));
+ mali_core_renderunit_stop_bus(core);
+ core->error_recovery = MALI_TRUE;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4,("Core: not active %s\n", core->description ));
+ }
+ }
+ /* Mutex is still being held, to prevent things to happen while we do cleanup */
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only resets cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are reset.
+*/
+static void reset_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(3, ("Handling: reset cores from mmu: 0x%x on %s\n", mmu, subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We reset only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Abort and reset core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_WAIT, JOB_STATUS_END_SEG_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Core: not active %s\n", core->description ));
+ }
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex. */
+static void continue_job_handling(struct mali_core_subsystem * subsys)
+{
+ u32 i, j;
+
+ MALI_DEBUG_PRINT(3, ("Handling: Continue: %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ core->error_recovery = MALI_FALSE;
+ }
+
+ i = subsys->number_of_cores;
+ j = subsys->awaiting_sessions_sum_all_priorities;
+
+ /* Schedule MIN(nr_waiting_jobs , number of cores) times */
+ while( i-- && j--)
+ {
+ mali_core_subsystem_schedule(subsys);
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* Unlock the subsystem. */
+static void unlock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data)
+{
+ void * mmu;
+ mmu = (void*) data;
+
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ break;
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ stop_bus_for_all_cores_on_mmu(subsys, mmu);
+ break;
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ reset_all_cores_on_mmu(subsys, mmu );
+ break;
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ continue_job_handling(subsys);
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+}
+#endif /* USING_MMU */
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs)
+{
+ if (watchdog_msecs == 0) job->watchdog_msecs = mali_max_job_runtime; /* use the default */
+ else if (watchdog_msecs > WATCHDOG_MSECS_MAX) job->watchdog_msecs = WATCHDOG_MSECS_MAX; /* no larger than max */
+ else if (watchdog_msecs < WATCHDOG_MSECS_MIN) job->watchdog_msecs = WATCHDOG_MSECS_MIN; /* not below min */
+ else job->watchdog_msecs = watchdog_msecs;
+}
+
+u32 mali_core_hang_check_timeout_get(void)
+{
+ /* check the value. The user might have set the value outside the allowed range */
+ if (mali_hang_check_interval > HANG_CHECK_MSECS_MAX) mali_hang_check_interval = HANG_CHECK_MSECS_MAX; /* cap to max */
+ else if (mali_hang_check_interval < HANG_CHECK_MSECS_MIN) mali_hang_check_interval = HANG_CHECK_MSECS_MIN; /* cap to min */
+
+ /* return the active value */
+ return mali_hang_check_interval;
+}
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data)
+{
+ mali_core_renderunit *core;
+ u32 has_pending_irq;
+
+ core = (mali_core_renderunit * )data;
+
+ if ( (NULL == core) ||
+ (NULL == core->subsystem) ||
+ (NULL == core->subsystem->irq_handler_upper_half) )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(core->subsystem);
+
+ has_pending_irq = core->subsystem->irq_handler_upper_half(core);
+
+ if ( has_pending_irq )
+ {
+ _mali_osk_irq_schedulework( core->irq ) ;
+ MALI_SUCCESS;
+ }
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+static void mali_core_irq_handler_bottom_half ( void *data )
+{
+ mali_core_renderunit *core;
+ mali_core_subsystem* subsystem;
+
+ mali_subsystem_job_end_code job_status;
+
+ core = (mali_core_renderunit * )data;
+
+ MALI_CHECK_CORE(core);
+ subsystem = core->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+ if ( CORE_IDLE == core->state ) goto end_function;
+
+ MALI_DEBUG_PRINT(5, ("IRQ: handling irq from core %s\n", core->description )) ;
+
+ _mali_osk_cache_flushall();
+
+ /* This function must also update the job status flag */
+ job_status = subsystem->irq_handler_bottom_half( core );
+
+ /* Retval is nonzero if the job is finished. */
+ if ( JOB_STATUS_CONTINUE_RUN != job_status )
+ {
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, job_status);
+ }
+ else
+ {
+ switch ( core->state )
+ {
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_DEBUG_PRINT(2, ("Watchdog SW Timeout of job from core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_TIMEOUT_SW );
+ break;
+
+ case CORE_POLL:
+ MALI_DEBUG_PRINT(5, ("Poll core: %s\n", core->description )) ;
+ core->state = CORE_WORKING;
+ _mali_osk_timer_add( core->timer, 1);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("IRQ: The job on the core continue to run: %s\n", core->description )) ;
+ break;
+ }
+ }
+end_function:
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+}
+
+void subsystem_flush_mapped_mem_cache(void)
+{
+ _mali_osk_cache_flushall();
+ _mali_osk_mem_barrier();
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only)
+{
+ mali_core_renderunit * core = NULL;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* It is possible that this signal funciton can be called during a driver exit,
+ * and so the requested core may now be destroyed. (This is due to us not having
+ * the subsys lock before signalling power down).
+ * mali_core_renderunit_get_mali_core_nr() will report a Mali ERR because
+ * the core number is out of range (which is a valid error in other cases).
+ * So instead we check here (now that we have the subsys lock) and let the
+ * caller cope with the core get failure and check that the core has
+ * been unregistered in the PMM as part of its destruction.
+ */
+ if ( subsys->number_of_cores > mali_core_nr )
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+ }
+
+ if ( NULL == core )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: Failed to find core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if ( core->state != CORE_IDLE )
+ {
+ /* When powering down we either set a pending power down flag here so we
+ * can power down cleanly after the job completes or we don't set the
+ * flag if we have been asked to only do a power down right now
+ * In either case, return that the core is busy
+ */
+ if ( !immediate_only ) core->pend_power_down = MALI_TRUE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No idle core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down flag set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to off queue */
+ mali_core_subsystem_move_core_set_off(core);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only)
+{
+ mali_core_renderunit * core;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+
+ if( core == NULL )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: Failed to find core to power up\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if( core->state != CORE_OFF )
+ {
+ /* This will usually happen because we are trying to cancel a pending power down */
+ core->pend_power_down = MALI_FALSE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No powered off core to power up (cancelled power down?)\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to idle queue */
+ mali_core_subsystem_move_core_set_idle(core);
+
+ if( !queue_only )
+ {
+ /* Reset MMU & core - core must be idle to allow this */
+#if USING_MMU
+ if ( NULL!=core->mmu )
+ {
+#if defined(USING_MALI200)
+ if (core->pmm_id != MALI_PMM_CORE_PP0)
+ {
+#endif
+ mali_kernel_mmu_reset(core->mmu);
+#if defined(USING_MALI200)
+ }
+#endif
+
+ }
+#endif /* USING_MMU */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ /* Need to schedule work to start on this core */
+ mali_core_subsystem_schedule(subsys);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+#endif /* USING_MALI_PMM */
+
+#if MALI_STATE_TRACKING
+void mali_core_renderunit_dump_state(mali_core_subsystem* subsystem)
+{
+ u32 i;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp_core;
+
+ mali_core_session* session;
+ mali_core_session* tmp_session;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ MALI_PRINT(("Subsystem;\n"));
+ MALI_PRINT((" Name: %s\n", subsystem->name));
+
+ for (i = 0; i < subsystem->number_of_cores; i++)
+ {
+ MALI_PRINT((" Core: #%u\n", subsystem->mali_core_array[i]->core_number));
+ MALI_PRINT((" Description: %s\n", subsystem->mali_core_array[i]->description));
+ switch(subsystem->mali_core_array[i]->state)
+ {
+ case CORE_IDLE:
+ MALI_PRINT((" State: CORE_IDLE\n"));
+ break;
+ case CORE_WORKING:
+ MALI_PRINT((" State: CORE_WORKING\n"));
+ break;
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_PRINT((" State: CORE_WATCHDOG_TIMEOUT\n"));
+ break;
+ case CORE_POLL:
+ MALI_PRINT((" State: CORE_POLL\n"));
+ break;
+ case CORE_HANG_CHECK_TIMEOUT:
+ MALI_PRINT((" State: CORE_HANG_CHECK_TIMEOUT\n"));
+ break;
+ case CORE_OFF:
+ MALI_PRINT((" State: CORE_OFF\n"));
+ break;
+ default:
+ MALI_PRINT((" State: Unknown (0x%X)\n", subsystem->mali_core_array[i]->state));
+ break;
+ }
+ MALI_PRINT((" Current job: 0x%x\n", (u32)(subsystem->mali_core_array[i]->current_job)));
+ MALI_PRINT((" Core version: 0x%x\n", subsystem->mali_core_array[i]->core_version));
+#if USING_MALI_PMM
+ MALI_PRINT((" PMM id: 0x%x\n", subsystem->mali_core_array[i]->pmm_id));
+ MALI_PRINT((" Power down requested: %s\n", subsystem->mali_core_array[i]->pend_power_down ? "TRUE" : "FALSE"));
+#endif
+ }
+
+ MALI_PRINT((" Cores on idle list:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+ MALI_PRINT((" Core #%u\n", core->core_number));
+ }
+
+ MALI_PRINT((" Cores on off list:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_off_head, mali_core_renderunit, list)
+ {
+ MALI_PRINT((" Core #%u\n", core->core_number));
+ }
+
+ MALI_PRINT((" Connected sessions:\n"));
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->all_sessions_head, mali_core_session, all_sessions_list)
+ {
+ MALI_PRINT((" Session 0x%X:\n", (u32)session));
+ MALI_PRINT((" Waiting job: 0x%X\n", (u32)session->job_waiting_to_run));
+ MALI_PRINT((" Notification queue: %s\n", _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY"));
+ }
+
+ MALI_PRINT((" Waiting sessions sum all priorities: %u\n", subsystem->awaiting_sessions_sum_all_priorities));
+ for (i = 0; i < PRIORITY_LEVELS; i++)
+ {
+ MALI_PRINT((" Waiting sessions with priority %u:\n", i));
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->awaiting_sessions_head[i], mali_core_session, awaiting_sessions_list)
+ {
+ MALI_PRINT((" Session 0x%X:\n", (u32)session));
+ MALI_PRINT((" Waiting job: 0x%X\n", (u32)session->job_waiting_to_run));
+ MALI_PRINT((" Notification queue: %s\n", _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY"));
+ }
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h
new file mode 100644
index 00000000000..94bce94d4e1
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_rendercore.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_RENDERCORE_H__
+#define __MALI_RENDERCORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#define PRIORITY_LEVELS 3
+#define PRIORITY_MAX 0
+#define PRIORITY_MIN (PRIORITY_MAX+PRIORITY_LEVELS-1)
+
+/* This file contains what we need in kernel for all core types. */
+
+typedef enum
+{
+ CORE_IDLE, /**< Core is ready for a new job */
+ CORE_WORKING, /**< Core is working on a job */
+ CORE_WATCHDOG_TIMEOUT, /**< Core is working but it has timed out */
+ CORE_POLL, /**< Poll timer triggered, pending handling */
+ CORE_HANG_CHECK_TIMEOUT,/**< Timeout for hang detection */
+ CORE_OFF /**< Core is powered off */
+} mali_core_status;
+
+typedef enum
+{
+ SUBSYSTEM_RESCHEDULE,
+ SUBSYSTEM_WAIT
+} mali_subsystem_reschedule_option;
+
+typedef enum
+{
+ MALI_CORE_RESET_STYLE_RUNABLE,
+ MALI_CORE_RESET_STYLE_DISABLE,
+ MALI_CORE_RESET_STYLE_HARD
+} mali_core_reset_style;
+
+typedef enum
+{
+ JOB_STATUS_CONTINUE_RUN = 0x01,
+ JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ JOB_STATUS_END_OOM = 1<<(16+1),
+ JOB_STATUS_END_ABORT = 1<<(16+2),
+ JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ JOB_STATUS_END_HANG = 1<<(16+4),
+ JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} mali_subsystem_job_end_code;
+
+
+struct mali_core_job;
+struct mali_core_subsystem;
+struct mali_core_renderunit;
+struct mali_core_session;
+
+/* We have one of these subsystems for each core type */
+typedef struct mali_core_subsystem
+{
+ struct mali_core_renderunit ** mali_core_array; /* An array of all cores of this type */
+ u32 number_of_cores; /* Number of cores in this list */
+
+ _mali_core_type core_type;
+
+ u32 magic_nr;
+
+ _mali_osk_list_t renderunit_idle_head; /* Idle cores of this type */
+ _mali_osk_list_t renderunit_off_head; /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduelling */
+ _mali_osk_list_t awaiting_sessions_head[PRIORITY_LEVELS];
+ u32 awaiting_sessions_sum_all_priorities;
+
+ /* Linked list of all sessions connected to this coretype */
+ _mali_osk_list_t all_sessions_head;
+
+ /* Linked list of all sessions connected to this coretype */
+ struct _mali_osk_notification_queue_t * notification_queue;
+
+ const char * name;
+ mali_kernel_subsystem_identifier id;
+
+ /**** Functions registered for this core type. Set during mali_core_init ******/
+ /* Start this job on this core. Return MALI_TRUE if the job was started. */
+ _mali_osk_errcode_t (*start_job)(struct mali_core_job * job, struct mali_core_renderunit * core);
+
+ /* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+ u32 (*irq_handler_upper_half)(struct mali_core_renderunit * core);
+
+ /* This function should check if the interrupt indicates that job was finished.
+ If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+ If the job is still working after this function it should return MALI_FALSE.
+ The function must also enable the bits in the interrupt mask for the core.
+ Called by the bottom half interrupt function. */
+ int (*irq_handler_bottom_half)(struct mali_core_renderunit* core);
+
+ /* This function is called from the ioctl function and should return a mali_core_job pointer
+ to a created mali_core_job object with the data given from userspace */
+ _mali_osk_errcode_t (*get_new_job_from_user)(struct mali_core_session * session, void * argument);
+
+ _mali_osk_errcode_t (*suspend_response)(struct mali_core_session * session, void * argument);
+
+ /* This function is called from the ioctl function and should write the necessary data
+ to userspace telling which job was finished and the status and debuginfo for this job.
+ The function must also free and cleanup the input job object. */
+ void (*return_job_to_user)(struct mali_core_job * job, mali_subsystem_job_end_code end_status);
+
+ /* Is called when a subsystem shuts down. This function needs to
+ release internal pointers in the core struct, and free the
+ core struct before returning.
+ It is not allowed to write to any registers, since this
+ unmapping is already done. */
+ void (*renderunit_delete)(struct mali_core_renderunit * core);
+
+ /* Is called when we want to abort a job that is running on the core.
+ This is done if program exits while core is running */
+ void (*reset_core)(struct mali_core_renderunit * core, mali_core_reset_style style);
+
+ /* Is called when the rendercore wants the core to give an interrupt */
+ void (*probe_core_irq_trigger)(struct mali_core_renderunit* core);
+
+ /* Is called when the irq probe wants the core to acknowledge an interrupt from the hw */
+ _mali_osk_errcode_t (*probe_core_irq_acknowledge)(struct mali_core_renderunit* core);
+
+ /* Called when the rendercore want to issue a bus stop request to a core */
+ void (*stop_bus)(struct mali_core_renderunit* core);
+} mali_core_subsystem;
+
+
+/* Per core data. This must be embedded into each core type internal core info. */
+typedef struct mali_core_renderunit
+{
+ struct mali_core_subsystem * subsystem; /* The core belongs to this subsystem */
+ _mali_osk_list_t list; /* Is always in subsystem->idle_list OR session->renderunits_working */
+ mali_core_status state;
+ mali_bool error_recovery; /* Indicates if the core is waiting for external help to recover (typically the MMU) */
+ mali_bool in_detach_function;
+ struct mali_core_job * current_job; /* Current job being processed on this core ||NULL */
+ u32 magic_nr;
+ _mali_osk_timer_t * timer;
+ _mali_osk_timer_t * timer_hang_detection;
+
+ mali_io_address registers_mapped; /* IO-mapped pointer to registers */
+ u32 registers_base_addr; /* Base addres of the registers */
+ u32 size; /* The size of registers_mapped */
+ const char * description; /* Description of this core. */
+ u32 irq_nr; /* The IRQ nr for this core */
+ u32 core_version;
+#if USING_MMU
+ u32 mmu_id;
+ void * mmu; /* The MMU this rendercore is behind.*/
+#endif
+#if USING_MALI_PMM
+ mali_pmm_core_id pmm_id; /* The PMM core id */
+ mali_bool pend_power_down; /* Power down is requested */
+#endif
+
+ u32 core_number; /* 0 for first detected core of this type, 1 for second and so on */
+
+ _mali_osk_irq_t *irq;
+} mali_core_renderunit;
+
+
+/* Per open FILE data. */
+/* You must held subsystem->mutex before any transactions to this datatype. */
+typedef struct mali_core_session
+{
+ struct mali_core_subsystem * subsystem; /* The session belongs to this subsystem */
+ _mali_osk_list_t renderunits_working_head; /* List of renderunits working for this session */
+ struct mali_core_job *job_waiting_to_run; /* The next job from this session to run */
+
+ _mali_osk_list_t awaiting_sessions_list; /* Linked list of sessions with jobs, for each priority */
+ _mali_osk_list_t all_sessions_list; /* Linked list of all sessions on the system. */
+
+ _mali_osk_notification_queue_t * notification_queue; /* Messages back to Base in userspace*/
+#if USING_MMU
+ struct mali_session_data * mmu_session; /* The session associated with the MMU page tables for this core */
+#endif
+ u32 magic_nr;
+} mali_core_session;
+
+
+/* This must be embedded into a specific mali_core_job struct */
+/* use this macro to get spesific mali_core_job: container_of(ptr, type, member)*/
+typedef struct mali_core_job
+{
+ _mali_osk_list_t list; /* Linked list of jobs. Used by struct mali_core_session */
+ struct mali_core_session *session;
+ u32 magic_nr;
+ u32 priority;
+ u32 watchdog_msecs;
+ u32 render_time_msecs ;
+ u32 start_time_jiffies;
+ unsigned long watchdog_jiffies;
+ u32 abort_id;
+} mali_core_job;
+
+/*
+ * The rendercode subsystem is included in the subsystems[] array.
+ */
+extern struct mali_kernel_subsystem mali_subsystem_rendercore;
+
+void subsystem_flush_mapped_mem_cache(void);
+
+
+#define SUBSYSTEM_MAGIC_NR 0xdeadbeef
+#define CORE_MAGIC_NR 0xcafebabe
+#define SESSION_MAGIC_NR 0xbabe1234
+#define JOB_MAGIC_NR 0x0123abcd
+
+
+#define MALI_CHECK_SUBSYSTEM(subsystem)\
+ do { \
+ if ( SUBSYSTEM_MAGIC_NR != subsystem->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0)
+
+#define MALI_CHECK_CORE(CORE)\
+ do { \
+ if ( CORE_MAGIC_NR != CORE->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_SESSION(SESSION)\
+ do { \
+ if ( SESSION_MAGIC_NR != SESSION->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_JOB(JOB)\
+ do { \
+ if ( JOB_MAGIC_NR != JOB->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+
+/* Check if job_a has higher priority than job_b */
+MALI_STATIC_INLINE int job_has_higher_priority(mali_core_job * job_a, mali_core_job * job_b)
+{
+ /* The lowest number has the highest priority */
+ return (int) (job_a->priority < job_b->priority);
+}
+
+MALI_STATIC_INLINE void job_priority_set(mali_core_job * job, u32 priority)
+{
+ if (priority > PRIORITY_MIN) job->priority = PRIORITY_MIN;
+ else job->priority = priority;
+}
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs);
+
+/* For use by const default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value
+{
+ u32 address;
+ u32 value;
+} register_address_and_value ;
+
+
+/* For use by dynamic default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value_list
+{
+ _mali_osk_list_t list;
+ register_address_and_value item;
+} register_address_and_value_list ;
+
+/* Used if the user wants to set a continious block of registers */
+typedef struct register_array_user
+{
+ u32 entries_in_array;
+ u32 start_address;
+ void __user * reg_array;
+}register_array_user;
+
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRAB %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ _mali_osk_lock_wait( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRABBED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ rendercores_global_mutex_is_held = 1; \
+ rendercores_global_mutex_owner = _mali_osk_get_tid(); \
+ } while (0) ;
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASE %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ rendercores_global_mutex_is_held = 0; \
+ rendercores_global_mutex_owner = 0; \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ _mali_osk_lock_signal( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0) ;
+
+
+#define MALI_ASSERT_MUTEX_IS_GRABBED(input_pointer)\
+ do { \
+ if ( 0 == rendercores_global_mutex_is_held ) MALI_PRINT_ERROR(("ASSERT MUTEX SHOULD BE GRABBED"));\
+ if ( SUBSYSTEM_MAGIC_NR != input_pointer->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ if ( rendercores_global_mutex_owner != _mali_osk_get_tid() ) MALI_PRINT_ERROR(("Owner mismatch"));\
+ } while (0)
+
+
+u32 mali_core_renderunit_register_read(struct mali_core_renderunit *core, u32 relative_address);
+void mali_core_renderunit_register_read_array(struct mali_core_renderunit *core, u32 relative_address, u32 * result_array, u32 nr_of_regs);
+void mali_core_renderunit_register_write(struct mali_core_renderunit *core, u32 relative_address, u32 new_val);
+void mali_core_renderunit_register_write_array(struct mali_core_renderunit *core, u32 relative_address, u32 * write_array, u32 nr_of_regs);
+
+_mali_osk_errcode_t mali_core_renderunit_init(struct mali_core_renderunit * core);
+void mali_core_renderunit_term(struct mali_core_renderunit * core);
+int mali_core_renderunit_map_registers(struct mali_core_renderunit *core);
+void mali_core_renderunit_unmap_registers(struct mali_core_renderunit *core);
+int mali_core_renderunit_irq_handler_add(struct mali_core_renderunit *core);
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr);
+
+int mali_core_subsystem_init(struct mali_core_subsystem * new_subsys);
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys);
+#endif
+int mali_core_subsystem_register_renderunit(struct mali_core_subsystem * subsys, struct mali_core_renderunit * core);
+int mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info);
+void mali_core_subsystem_cleanup(struct mali_core_subsystem * subsys);
+#if USING_MMU
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data);
+#endif
+void mali_core_session_begin(mali_core_session *session);
+void mali_core_session_close(mali_core_session * session);
+int mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return);
+u32 mali_core_hang_check_timeout_get(void);
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void* argument);
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id);
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only);
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only);
+#endif
+
+#if MALI_STATE_TRACKING
+void mali_core_renderunit_dump_state(mali_core_subsystem* subsystem);
+#endif
+
+#endif /* __MALI_RENDERCORE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h
new file mode 100644
index 00000000000..ce290d0c1c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_session_manager.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SESSION_MANAGER_H__
+#define __MALI_KERNEL_SESSION_MANAGER_H__
+
+/* Incomplete struct to pass around pointers to it */
+struct mali_session_data;
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session, int id);
+
+#endif /* __MALI_KERNEL_SESSION_MANAGER_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h
new file mode 100644
index 00000000000..e8c6530668c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_subsystem.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_subsystem.h
+ */
+
+#ifndef __MALI_KERNEL_SUBSYSTEM_H__
+#define __MALI_KERNEL_SUBSYSTEM_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+
+/* typedefs of the datatypes used in the hook functions */
+typedef void * mali_kernel_subsystem_session_slot;
+typedef int mali_kernel_subsystem_identifier;
+typedef _mali_osk_errcode_t (*mali_kernel_resource_registrator)(_mali_osk_resource_t *);
+
+/**
+ * Broadcast notification messages
+ */
+typedef enum mali_core_notification_message
+{
+ MMU_KILL_STEP0_LOCK_SUBSYSTEM, /**< Request to lock subsystem */
+ MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, /**< Request to stop all buses */
+ MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, /**< Request kill all jobs, and not start more jobs */
+ MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, /**< Request to continue with new jobs on all cores */
+ MMU_KILL_STEP4_UNLOCK_SUBSYSTEM /**< Request to unlock subsystem */
+} mali_core_notification_message;
+
+/**
+ * A function pointer can be NULL if the subsystem isn't interested in the event.
+ */
+typedef struct mali_kernel_subsystem
+{
+ /* subsystem control */
+ _mali_osk_errcode_t (*startup)(mali_kernel_subsystem_identifier id); /**< Called during module load or system startup*/
+ void (*shutdown)(mali_kernel_subsystem_identifier id); /**< Called during module unload or system shutdown */
+
+ /**
+ * Called during module load or system startup.
+ * Called when all subsystems have reported startup OK and all resources where successfully initialized
+ */
+ _mali_osk_errcode_t (*load_complete)(mali_kernel_subsystem_identifier id);
+
+ /* per subsystem handlers */
+ _mali_osk_errcode_t (*system_info_fill)(_mali_system_info* info); /**< Fill info into info struct. MUST allocate memory with kmalloc, since it's kfree'd */
+
+ /* per session handlers */
+ /**
+ * Informs about a new session.
+ * slot can be used to track per-session per-subsystem data.
+ * queue can be used to send events to user space.
+ * _mali_osk_errcode_t error return value.
+ */
+ _mali_osk_errcode_t (*session_begin)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+ /**
+ * Informs that a session is ending
+ * slot was the same as given during session_begin
+ */
+ void (*session_end)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+ /* Used by subsystems to send messages to each other. This is the receiving end */
+ void (*broadcast_notification)(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+ /** Dump the current state of the subsystem */
+ void (*dump_state)(void);
+#endif
+} mali_kernel_subsystem;
+
+/* functions used by the subsystems to interact with the core */
+/**
+ * Register a resouce handler
+ * @param type The resoruce type to register a handler for
+ * @param handler Pointer to the function handling this resource
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler);
+
+/* function used to interact with other subsystems */
+/**
+ * Broadcast a message
+ * Sends a message to all subsystems which have registered a broadcast notification handler
+ * @param message The message to send
+ * @param data Message specific extra data
+ */
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+/**
+ * Tell all subsystems to dump their current state
+ */
+void _mali_kernel_core_dump_state(void);
+#endif
+
+
+#endif /* __MALI_KERNEL_SUBSYSTEM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c
new file mode 100644
index 00000000000..04653b06ad5
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+#define MALI_GPU_UTILIZATION_TIMEOUT 500
+
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+
+static void calculate_gpu_utilization(void* arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized;
+ u32 period_normalized;
+ u32 utilization;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time == 0 && work_start_time == 0)
+ {
+ /* Don't reschedule timer, this will be started if new work arrives */
+ timer_running = MALI_FALSE;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* No work done for this period, report zero usage */
+ mali_gpu_utilization_handler(0);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time != 0)
+ {
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = time_now;
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized = (u32)(accumulated_work_time >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF)
+ {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ }
+ else
+ {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized <<= 8;
+ }
+
+ utilization = work_normalized / period_normalized;
+
+ accumulated_work_time = 0;
+ period_start_time = time_now; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+
+ mali_gpu_utilization_handler(utilization);
+}
+
+
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+ time_data_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ|_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == time_data_lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_atomic_init(&num_running_cores, 0);
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer)
+ {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_atomic_term(&num_running_cores);
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+
+
+void mali_utilization_core_start(void)
+{
+ if (_mali_osk_atomic_inc_return(&num_running_cores) == 1)
+ {
+ /*
+ * We went from zero cores working, to one core working,
+ * we now consider the entire GPU for being busy
+ */
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ work_start_time = _mali_osk_time_get_ns();
+
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = work_start_time; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+}
+
+
+
+void mali_utilization_core_end(void)
+{
+ if (_mali_osk_atomic_dec_return(&num_running_cores) == 0)
+ {
+ /*
+ * No more cores are working, so accumulate the time we was busy.
+ */
+ u64 time_now;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ time_now = _mali_osk_time_get_ns();
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h
new file mode 100644
index 00000000000..f6485006729
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_utilization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(void);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c
new file mode 100644
index 00000000000..8dfa3a393ba
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_kernel_vsync.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+/*#include "mali_timestamp.h"
+*/
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+/* u64 ts = _mali_timestamp_get();
+ */
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h
new file mode 100644
index 00000000000..c35d90577a3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk.h
@@ -0,0 +1,1620 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_irq_schedulework(). Refer to \ref _mali_osk_irq_schedulework() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @brief IRQ 'bottom-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the deferred handling
+ * of a resource's IRQ. Usually, this work cannot be carried out in IRQ context
+ * by the IRQ upper-half handler.
+ *
+ * The IRQ bottom-half handler maps on to the concept of an IST that may
+ * execute some time after the actual IRQ has fired.
+ *
+ * All OSK-registered IRQ bottom-half handlers will be serialized, across all
+ * CPU-cores in the system.
+ *
+ * Refer to \ref _mali_osk_irq_schedulework() for more information on the
+ * IRQ work-queue, and the calling of the IRQ bottom-half handler.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_irq_bhandler_t)( void * arg );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Even though the structure has space for a u32, the counters will only
+ * represent signed 24-bit integers.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_irq_schedulework() to make use of the IRQ bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief The known resource types
+ *
+ * @note \b IMPORTANT: these must remain fixed, and only be extended. This is
+ * because not all systems use a header file for reading in their resources.
+ * The resources may instead come from a data file where these resources are
+ * 'hard-coded' in, because there's no easy way of transferring the enum values
+ * into such data files. E.g. the C-Pre-processor does \em not process enums.
+ */
+typedef enum _mali_osk_resource_type
+{
+ RESOURCE_TYPE_FIRST =0, /**< Duplicate resource marker for the first resource*/
+ MEMORY =0, /**< Physically contiguous memory block, not managed by the OS */
+ OS_MEMORY =1, /**< Memory managed by and shared with the OS */
+ MALI200 =3, /**< Mali200 Programmable Fragment Shader */
+ MALIGP2 =4, /**< MaliGP2 Programmable Vertex Shader */
+ MMU =5, /**< Mali MMU (Memory Management Unit) */
+ FPGA_FRAMEWORK =6, /**< Mali registers specific to FPGA implementations */
+ MALI400L2 =7, /**< Mali400 L2 Cache */
+ MALI300L2 =7, /**< Mali300 L2 Cache */
+ MALI400GP =8, /**< Mali400 Programmable Vertex Shader Core */
+ MALI300GP =8, /**< Mali300 Programmable Vertex Shader Core */
+ MALI400PP =9, /**< Mali400 Programmable Fragment Shader Core */
+ MALI300PP =9, /**< Mali300 Programmable Fragment Shader Core */
+ MEM_VALIDATION =10, /**< External Memory Validator */
+ PMU =11, /**< Power Manangement Unit */
+ RESOURCE_TYPE_COUNT /**< The total number of known resources */
+} _mali_osk_resource_type_t;
+
+/** @brief resource description struct
+ *
+ * _mali_osk_resources_init() will enumerate objects of this type. Not all
+ * members have a valid meaning across all types.
+ *
+ * The mmu_id is used to group resources to a certain MMU, since there may be
+ * more than one MMU in the system, and each resource may be using a different
+ * MMU:
+ * - For MMU resources, the setting of mmu_id is a uniquely identifying number.
+ * - For Other resources, the setting of mmu_id determines which MMU the
+ * resource uses.
+ */
+typedef struct _mali_osk_resource
+{
+ _mali_osk_resource_type_t type; /**< type of the resource */
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ s32 cpu_usage_adjust; /**< Offset added to the base address of the resource to arrive at the CPU physical address of the resource (if different from the Mali physical address) */
+ u32 size; /**< Size in bytes of the resource - either the size of its register range, or the size of the memory block. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+ u32 flags; /**< Resources-specific flags. */
+ u32 mmu_id; /**< Identifier for Mali MMU resources. */
+ u32 alloc_order; /**< Order in which MEMORY/OS_MEMORY resources are used */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Fake IRQ number for testing purposes
+ */
+#define _MALI_OSK_IRQ_NUMBER_FAKE ((u32)0xFFFFFFF1)
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief PMM Virtual IRQ number
+ */
+#define _MALI_OSK_IRQ_NUMBER_PMM ((u32)0xFFFFFFF2)
+
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * The _mali_osk_irq_t returned must be written into the resource-specific data
+ * pointed to by data. This is so that the upper and lower handlers can call
+ * _mali_osk_irq_schedulework().
+ *
+ * @note The caller must ensure that the resource does not generate an
+ * interrupt after _mali_osk_irq_init() finishes, and before the
+ * _mali_osk_irq_t is written into the resource-specific data. Otherwise,
+ * the upper-half handler will fail to call _mali_osk_irq_schedulework().
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and trigger_func and ack_func must be
+ * non-NULL.
+ * @param uhandler The upper-half handler, corresponding to a ISR handler for
+ * the resource
+ * @param bhandler The lower-half handler, corresponding to an IST handler for
+ * the resource
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param data resource-specific data, which will be passed to uhandler,
+ * bhandler and (if present) trigger_func and ack_funnc
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description );
+
+/** @brief Cause a queued, deferred call of the IRQ bottom-half.
+ *
+ * _mali_osk_irq_schedulework provides a mechanism for enqueuing deferred calls
+ * to the IRQ bottom-half handler. The queue is known as the IRQ work-queue.
+ * After calling _mali_osk_irq_schedulework(), the IRQ bottom-half handler will
+ * be scheduled to run at some point in the future.
+ *
+ * This is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_irq_schedulework() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_irq_schedulework() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the IRQ bottom-half handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_irq_schedulework()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The IRQ bottom-half callback (of type _mali_osk_irq_bhandler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_irq_schedulework() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_irq_schedulework() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * IRQs will be lost.
+ *
+ * The queue that _mali_osk_irq_schedulework implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_irq_schedulework
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered IRQs.
+ *
+ * The consequence of the queue being a 1-reader type is that calling
+ * _mali_osk_irq_schedulework() on different _mali_osk_irq_t objects causes
+ * their IRQ bottom-halves to be serialized, across all CPU-cores in the
+ * system.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ bottom-half must begin processing.
+ */
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for the
+ * IRQ work-queue to finish the work that is currently in the queue. That is,
+ * for every deferred call currently in the IRQ work-queue, it waits for each
+ * of those to be processed by their respective IRQ bottom-half handler.
+ *
+ * This function is used to ensure that the bottom-half handler of the supplied
+ * IRQ object will not be running at the completion of this function call.
+ * However, the caller must ensure that no other sources could call the
+ * _mali_osk_irq_schedulework() on the same IRQ object. For example, the
+ * relevant timers must be stopped.
+ *
+ * @note While this function is being called, other OSK-registered IRQs in the
+ * system may enqueue work for their respective bottom-half handlers. This
+ * function will not wait for those entries in the work-queue to be flushed.
+ *
+ * Since this blocks on the completion of work in the IRQ work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any OSK-registered IRQ bottom-half handler. To do so may cause a deadlock.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * The counters have storage for signed 24-bit integers. Initializing to signed
+ * values requiring more than 24-bits storage will fail.
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which affects memory
+ * mapped by _mali_osk_mem_mapregion. It will not be needed for memory
+ * mapped through _mali_osk_mem_mapioregion.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+#if MALI_STATE_TRACKING
+/** @brief Receive a notification from a queue
+ *
+ * Check if a notification queue is empty.
+ *
+ * @param queue The queue to check.
+ * @return MALI_TRUE if queue is empty, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue );
+#endif
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the absolute time at which a timer will expire, and start it if it is
+ * stopped. If \a expiry_tick is in the past (determined by
+ * _mali_osk_time_after() ), the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at absolute time \a expiry_tick, at which point, the
+ * callback function will be invoked with the callback-specific data, as set
+ * by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param expiry_tick the \em absolute time in ticks at which this timer should
+ * trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the IRQ work-queue by the timer (with
+ * \ref _mali_osk_irq_schedulework()) may still run. The timer callback and IRQ
+ * bottom-half handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h
new file mode 100644
index 00000000000..28026ba1e6d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h
new file mode 100644
index 00000000000..a8d15f2a107
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_list.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE int _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Join two lists
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to join a list into another list at a specific location
+ *
+ * @param list the new list to add
+ * @param at the location in a list to add the new list into
+ */
+MALI_STATIC_INLINE void _mali_osk_list_splice( _mali_osk_list_t *list, _mali_osk_list_t *at )
+{
+ if (!_mali_osk_list_empty(list))
+ {
+ /* insert all items from 'list' after 'at' */
+ _mali_osk_list_t *first = list->next;
+ _mali_osk_list_t *last = list->prev;
+ _mali_osk_list_t *split = at->next;
+
+ first->prev = at;
+ at->next = first;
+
+ last->next = split;
+ split->prev = last;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h
new file mode 100644
index 00000000000..9a046fb91eb
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_osk_mali.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Initialize the OSK layer
+ *
+ * This function is used to setup any initialization of OSK functionality, if
+ * required.
+ *
+ * This must be the first function called from the common code, specifically,
+ * from the common code entry-point, mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_constructor when the device driver is loaded.
+ *
+ * @return _MALI_OSK_ERR_OK on success, or a suitable _mali_osk_errcode_t on
+ * failure.
+ */
+_mali_osk_errcode_t _mali_osk_init( void );
+
+/** @brief Terminate the OSK layer
+ *
+ * This function is used to terminate any resources initialized by
+ * _mali_osk_init.
+ *
+ * This must be the last function called from the common code, specifically,
+ * from the common code closedown function, mali_kernel_destructor, and the
+ * error path in mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_destructor when the device driver is terminated.
+ */
+void _mali_osk_term( void );
+
+/** @brief Read the Mali Resource configuration
+ *
+ * Populates a _mali_arch_resource_t array from configuration settings, which
+ * are stored in an OS-specific way.
+ *
+ * For example, these may be compiled in to a static structure, or read from
+ * the filesystem at startup.
+ *
+ * On failure, do not call _mali_osk_resources_term.
+ *
+ * @param arch_config a pointer to the store the pointer to the resources
+ * @param num_resources the number of resources read
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_NOMEM on allocation
+ * error. For other failures, a suitable _mali_osk_errcode_t is returned.
+ */
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources );
+
+/** @brief Free resources allocated by _mali_osk_resources_init.
+ *
+ * Frees the _mali_arch_resource_t array allocated by _mali_osk_resources_init
+ *
+ * @param arch_config a pointer to the stored the pointer to the resources
+ * @param num_resources the number of resources in the array
+ */
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources);
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h
new file mode 100644
index 00000000000..9a1583658c9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_uk_types.h
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/*
+ * NOTE: Because this file can be included from user-side and kernel-side,
+ * it is up to the includee to ensure certain typedefs (e.g. u32) are already
+ * defined when #including this.
+ */
+#include "regs/mali_200_regs.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_GET_SYSTEM_INFO_SIZE, /**< _mali_ukk_get_system_info_size() */
+ _MALI_UK_GET_SYSTEM_INFO, /**< _mali_ukk_get_system_info() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_ABORT_JOB, /**< Abort a job */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_PP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_pp_abort_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_gp_abort_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+
+#if USING_MALI_PMM
+ /** Power Management Module Functions */
+ _MALI_UK_PMM_EVENT_MESSAGE = 0, /**< Raise an event message */
+#endif
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+/** @brief Information about each Mali Core
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Both Fragment Processor (PP) and Vertex Processor (GP) cores are represented
+ * by this struct.
+ *
+ * The type is reported by the type field, _mali_core_info::_mali_core_type.
+ *
+ * Each core is given a unique Sequence number identifying it, the core_nr
+ * member.
+ *
+ * Flags are taken directly from the resource's flags, and are currently unused.
+ *
+ * Multiple mali_core_info structs are linked in a single linked list using the next field
+ */
+typedef struct _mali_core_info
+{
+ _mali_core_type type; /**< Type of core */
+ _mali_core_version version; /**< Core Version, as reported by the Core's Version Register */
+ u32 reg_address; /**< Address of Registers */
+ u32 core_nr; /**< Sequence number */
+ u32 flags; /**< Flags. Currently Unused. */
+ struct _mali_core_info * next; /**< Next core in Linked List */
+} _mali_core_info;
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+ _MALI_PP_READABLE = (1<<0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1<<1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1<<2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1<<3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1<<4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1<<5), /** Writeable by the CPU */
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * Each bank is uniquely identified by its identifier member. On Mali-nonMMU
+ * systems, to allocate from this bank, the value of identifier must be passed
+ * as the type_id member of the _mali_uk_get_big_block_s arguments to
+ * _mali_ukk_get_big_block.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /**< Unique identifier, to be used in allocate calls */
+ struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @brief Info about the whole Mali system.
+ *
+ * This Contains a linked list of the cores and memory banks available. Each
+ * list pointer will remain inside the system_info buffer supplied in the
+ * _mali_uk_get_system_info_s arguments to a _mali_ukk_get_system_info call.
+ *
+ * The has_mmu member must be inspected to ensure the correct group of
+ * Memory function calls is obtained - that is, those for either Mali-MMU
+ * or Mali-nonMMU. @see _mali_uk_memory
+ */
+typedef struct _mali_system_info
+{
+ _mali_core_info * core_info; /**< List of _mali_core_info structures */
+ _mali_mem_info * mem_info; /**< List of _mali_mem_info structures */
+ u32 has_mmu; /**< Non-zero if Mali-MMU present. Zero otherwise. */
+ _mali_driver_mode drivermode; /**< Reserved. Must always be _MALI_DRIVER_MODE_NORMAL */
+} _mali_system_info;
+
+/** @brief Arguments to _mali_ukk_get_system_info()
+ *
+ * A buffer of the size returned by _mali_ukk_get_system_info_size() must be
+ * allocated, and the pointer to this buffer must be written into the
+ * system_info member. The buffer must be suitably aligned for storage of
+ * the _mali_system_info structure - for example, one returned by
+ * _mali_osk_malloc(), which will be suitably aligned for any structure.
+ *
+ * The ukk_private member must be set to zero by the user-side. Under an OS
+ * implementation, the U/K interface must write in the user-side base address
+ * into the ukk_private member, so that the common code in
+ * _mali_ukk_get_system_info() can determine how to adjust the pointers such
+ * that they are sensible from user space. Leaving ukk_private as NULL implies
+ * that no pointer adjustment is necessary - which will be the case on a
+ * bare-metal/RTOS system.
+ *
+ * @see _mali_system_info
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer provided to store system information data */
+ _mali_system_info * system_info; /**< [in,out] pointer to buffer to store system information data. No initialisation of buffer required on input. */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+} _mali_uk_get_system_info_s;
+/** @} */ /* end group _mali_uk_getsysteminfo */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED, /**< Job started and bumped a lower priority job that was pending execution */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum
+{
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1<<(16+1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1<<(16+2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1<<(16+4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_gp_job_info
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space, a @c mali_gp_job_info* */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE (1<<2) /**< Enable performance counter L2_SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE (1<<3) /**< Enable performance counter L2_SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET (1<<4) /**< Enable performance counter L2_RESET for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of the GP interrupt rawstat register (see ARM DDI0415A) */
+ u32 status_reg_on_stop; /**< [out] value of the GP control register */
+ u32 vscl_stop_addr; /**< [out] value of the GP VLSCL start register */
+ u32 plbcl_stop_addr; /**< [out] value of the GP PLBCL start register */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of milliseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+ _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_pp_job
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALI200_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job, see ARM DDI0415A */
+ u32 wb0_registers[MALI200_NUM_REGS_WBx];
+ u32 wb1_registers[MALI200_NUM_REGS_WBx];
+ u32 wb2_registers[MALI200_NUM_REGS_WBx];
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of interrupt rawstat register (see ARM DDI0415A) */
+ u32 last_tile_list_addr; /**< [out] value of renderer list register (see ARM DDI0415A); necessary to restart a stopped job */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of milliseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_job_finished_s;
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union
+ {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 8
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mali_address_base; /**< [out] start of MALI address space */
+ u32 memory_size; /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** @brief Arguments for _mali_ukk_get_big_block()
+ *
+ * - type_id should be set to the value of the identifier member of one of the
+ * _mali_mem_info structures returned through _mali_ukk_get_system_info()
+ * - ukk_private must be zero when calling from user-side. On Kernel-side, the
+ * OS implementation of the U/K interface can use it to communicate data to the
+ * OS implementation of the OSK layer. Specifically, ukk_private will be placed
+ * into the ukk_private member of the _mali_uk_mem_mmap_s structure. See
+ * _mali_ukk_mem_mmap() for more details.
+ * - minimum_size_requested will be updated if it is too small
+ * - block_size will always be >= minimum_size_requested, because the underlying
+ * allocation mechanism may only be able to divide up memory regions in certain
+ * ways. To avoid wasting memory, block_size should always be taken into account
+ * rather than assuming minimum_size_requested was really allocated.
+ * - to free the memory, the returned cookie member must be stored, and used to
+ * refer to it.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 type_id; /**< [in] the type id of the memory bank to allocate memory from */
+ u32 minimum_size_requested; /**< [in,out] minimum size of the allocation */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+ u32 mali_address; /**< [out] address of the allocation in mali address space */
+ void *cpuptr; /**< [out] address of the allocation in the current process address space */
+ u32 block_size; /**< [out] size of the block that got allocated */
+ u32 flags; /**< [out] flags associated with the allocated block, of type _mali_bus_usage */
+ u32 cookie; /**< [out] identifier for the allocated block in kernel space */
+} _mali_uk_get_big_block_s;
+
+/** @brief Arguments for _mali_ukk_free_big_block()
+ *
+ * All that is required is that the cookie member must be set to the value of
+ * the cookie member returned through _mali_ukk_get_big_block()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_free_big_block_s;
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s and _mali_uk_attach_ump_mem_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+/** @note Mali-MMU only; will be supported in future version */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Fragment Processor cores in the system */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_pp_abort_job_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_gp_abort_job_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+#if USING_MALI_PMM
+
+/** @defgroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/** @brief Power management event message identifiers.
+ *
+ * U/K events start after id 200, and can range up to 999
+ * Adding new events will require updates to the PMM mali_pmm_event_id type
+ */
+#define _MALI_PMM_EVENT_UK_EXAMPLE 201
+
+/** @brief Generic PMM message data type, that will be dependent on the event msg
+ */
+typedef u32 mali_pmm_message_data;
+
+
+/** @brief Arguments to _mali_ukk_pmm_event_message()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 id; /**< [in] event id */
+ mali_pmm_message_data data; /**< [in] specific data associated with the event */
+} _mali_uk_pmm_message_s;
+
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h
new file mode 100644
index 00000000000..d066046901d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/mali_ukk.h
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - On OS systems (not including direct function call U/K interface
+ * implementations), _mali_ukk_get_big_block() may succeed, but the subsequent
+ * copying to user space may fail.
+ * - A problem scenario exists: some memory has been reserved by
+ * _mali_ukk_get_big_block(), but the user-mode will be unaware of it (it will
+ * never receive any information about this memory). In this case, the U/K
+ * implementation must do everything necessary to 'rollback' the \em atomic
+ * _mali_ukk_get_big_block() transaction.
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Returns the size of the buffer needed for a _mali_ukk_get_system_info call
+ *
+ * This function must be called before a call is made to
+ * _mali_ukk_get_system_info, so that memory of the correct size can be
+ * allocated, and a pointer to this memory written into the system_info member
+ * of _mali_uk_get_system_info_s.
+ *
+ * @param args see _mali_uk_get_system_info_size_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info_size( _mali_uk_get_system_info_size_s *args );
+
+/** @brief Returns information about the system (cores and memory banks)
+ *
+ * A buffer for this needs to be allocated by the caller. The size of the buffer required is returned by
+ * _mali_ukk_get_system_info_size(). The user is responsible for freeing the buffer.
+ *
+ * The _mali_system_info structure will be written to the start of this buffer,
+ * and the core_info and mem_info lists will be written to locations inside
+ * the buffer, and will be suitably aligned.
+ *
+ * Under OS implementations of the U/K interface we need to pack/unpack
+ * pointers across the user/kernel boundary. This has required that we malloc()
+ * an intermediate buffer inside the kernel-side U/K interface, and free it
+ * before returning to user-side. To avoid modifying common code, we do the
+ * following pseudo-code, which we shall call 'pointer switching':
+ *
+ * @code
+ * {
+ * Copy_From_User(kargs, args, ... );
+ * void __user * local_ptr = kargs->system_info;
+ * kargs->system_info = _mali_osk_malloc( ... );
+ * _mali_ukk_get_system_info( kargs );
+ * Copy_To_User( local_ptr, kargs->system_info, ... );
+ * _mali_osk_free( kargs->system_info );
+ * }
+ * @endcode
+ * @note The user-side's args->system_info members was unmodified here.
+ *
+ * However, the current implementation requires an extra ukk_private word so that the common code can work out
+ * how to patch pointers to user-mode for an OS's U/K implementation, this should be set to the user-space
+ * destination address for pointer-patching to occur. When NULL, it is unused, an no pointer-patching occurs in the
+ * common code.
+ *
+ * @param args see _mali_uk_get_system_info_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args );
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map a block of memory into the current user process
+ *
+ * Allocates a minimum of minimum_size_requested bytes of MALI memory and maps it into the current
+ * process space. The number of bytes allocated is returned in args->block_size.
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_get_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/** @brief Unmap a block of memory from the current user process
+ *
+ * Frees allocated MALI memory and unmaps it from the current process space. The previously allocated memory
+ * is indicated by the cookie as returned by _mali_ukk_get_big_block().
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_free_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode. In Mali-nonMMU mode, the function is callable
+ * from the kernel side, and is used to implement _mali_ukk_get_big_block() in this case.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * This means that in the first (nonMMU) case, the caller must manage the physical address allocations. The caller
+ * in this case is _mali_ukk_get_big_block(), which does indeed manage the Mali physical address ranges.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a
+ * pointer to the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_pp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Abort any PP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ *
+ * @param args see _malu_uk_pp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a pointer to
+ * the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_gp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @brief Abort any GP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ *
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ * @param args see _mali_uk_gp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_gp */
+
+#if USING_MALI_PMM
+/** @addtogroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/* @brief Power Management Module event message
+ *
+ * @note The event message can fail to be sent due to OOM but this is
+ * stored in the PMM state machine to be handled later
+ *
+ * @param args see _mali_uk_pmm_event_message_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args );
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c
new file mode 100644
index 00000000000..eeb29589ddc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.c
+ * Implementation of the power management module for the kernel device driver
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_platform.h"
+
+/* Internal PMM subsystem state */
+static _mali_pmm_internal_state_t *pmm_state = NULL;
+/* Mali kernel subsystem id */
+static mali_kernel_subsystem_identifier mali_subsystem_pmm_id = -1;
+
+#define GET_PMM_STATE_PTR (pmm_state)
+
+/* Internal functions */
+static _mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource);
+static void pmm_event_process( void );
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data);
+void malipmm_irq_bhandler(void *data);
+
+/** @brief Start the PMM subsystem
+ *
+ * @param id Subsystem id to uniquely identify this subsystem
+ * @return _MALI_OSK_ERR_OK if the system started successfully, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id );
+
+/** @brief Perform post start up of the PMM subsystem
+ *
+ * Post start up includes initializing the current policy, now that the system is
+ * completely started - to stop policies turning off hardware during the start up
+ *
+ * @param id the unique subsystem id
+ * @return _MALI_OSK_ERR_OK if the post startup was successful, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id );
+
+/** @brief Terminate the PMM subsystem
+ *
+ * @param id the unique subsystem id
+ */
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id );
+
+#if MALI_STATE_TRACKING
+ void malipmm_subsystem_dump_state( void );
+#endif
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+struct mali_kernel_subsystem mali_subsystem_pmm=
+{
+ malipmm_kernel_subsystem_start, /* startup */
+ malipmm_kernel_subsystem_terminate, /* shutdown */
+ malipmm_kernel_load_complete, /* loaded all subsystems */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+#if MALI_STATE_TRACKING
+ malipmm_subsystem_dump_state, /* dump_state */
+#endif
+};
+
+#if PMM_OS_TEST
+
+u32 power_test_event = 0;
+mali_bool power_test_flag = MALI_FALSE;
+_mali_osk_timer_t *power_test_timer = NULL;
+
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS UP DONE\n"));
+}
+
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS DOWN DONE\n"));
+}
+
+/**
+ * Symbian OS Power Up call to the driver
+ */
+void power_test_callback( void *arg )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ power_test_flag = MALI_TRUE;
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+void power_test_start()
+{
+ power_test_timer = _mali_osk_timer_init();
+ _mali_osk_timer_setcallback( power_test_timer, power_test_callback, NULL );
+
+ /* First event is power down */
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ _mali_osk_timer_add( power_test_timer, 10000 );
+}
+
+mali_bool power_test_check()
+{
+ if( power_test_flag )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ 0,
+ 1 };
+ event.id = power_test_event;
+
+ power_test_flag = MALI_FALSE;
+
+ /* Send event */
+ _mali_ukk_pmm_event_message( &event );
+
+ /* Switch to next event to test */
+ if( power_test_event == MALI_PMM_EVENT_OS_POWER_DOWN )
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_UP;
+ }
+ else
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ }
+ _mali_osk_timer_add( power_test_timer, 5000 );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void power_test_end()
+{
+ _mali_osk_timer_del( power_test_timer );
+ _mali_osk_timer_term( power_test_timer );
+ power_test_timer = NULL;
+}
+
+#endif
+
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ _mali_osk_notification_t *msg;
+ mali_pmm_message_t *event;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: sending message\n") );
+
+#if MALI_PMM_TRACE && MALI_PMM_TRACE_SENT_EVENTS
+ _mali_pmm_trace_event_message( args, MALI_FALSE );
+#endif
+
+ msg = _mali_osk_notification_create( MALI_PMM_NOTIFICATION_TYPE, sizeof( mali_pmm_message_t ) );
+
+ if( msg )
+ {
+ event = (mali_pmm_message_t *)msg->result_buffer;
+ event->id = args->id;
+ event->ts = _mali_osk_time_tickcount();
+ event->data = args->data;
+
+ _mali_osk_atomic_inc( &(pmm->messages_queued) );
+
+ if( args->id > MALI_PMM_EVENT_INTERNALS )
+ {
+ /* Internal PMM message */
+ _mali_osk_notification_queue_send( pmm->iqueue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_sent++;
+ #endif
+ }
+ else
+ {
+ /* Real event */
+ _mali_osk_notification_queue_send( pmm->queue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_sent++;
+ #endif
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Could not send message %d", args->id) );
+ /* Make note of this OOM - which has caused a missed event */
+ pmm->missed++;
+ }
+
+ /* Schedule time to look at the event or the fact we couldn't create an event */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+mali_pmm_state _mali_pmm_state( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm && (mali_subsystem_pmm_id != -1) )
+ {
+ return pmm->state;
+ }
+
+ /* No working subsystem yet */
+ return MALI_PMM_STATE_UNAVAILABLE;
+}
+
+
+mali_pmm_core_mask _mali_pmm_cores_list( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_registered;
+}
+
+mali_pmm_core_mask _mali_pmm_cores_powered( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_powered;
+}
+
+
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+/* TBD - When this is not a stub... include tracing...
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( old, newpolicy );
+#endif
+*/
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy )
+{
+ if( policy )
+ {
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm )
+ {
+ *policy = pmm->policy;
+ MALI_SUCCESS;
+ }
+ else
+ {
+ *policy = MALI_PMM_POLICY_NONE;
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+ }
+
+ /* No return argument */
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+}
+
+#if MALI_PMM_TRACE
+
+/* Event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events[] = {
+ "OS_POWER_UP",
+ "OS_POWER_DOWN",
+ "JOB_SCHEDULED",
+ "JOB_QUEUED",
+ "JOB_FINISHED",
+ "TIMEOUT",
+};
+
+/* UK event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_uk[] = {
+ "UKS",
+ "UK_EXAMPLE",
+};
+
+/* Internal event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_internal[] = {
+ "INTERNALS",
+ "INTERNAL_POWER_UP_ACK",
+ "INTERNAL_POWER_DOWN_ACK",
+};
+
+/* State names - order must match mali_pmm_state enum */
+static char *pmm_trace_state[] = {
+ "UNAVAILABLE",
+ "SYSTEM ON",
+ "SYSTEM OFF",
+ "SYSTEM TRANSITION",
+};
+
+/* Policy names - order must match mali_pmm_policy enum */
+static char *pmm_trace_policy[] = {
+ "NONE",
+ "ALWAYS ON",
+ "JOB CONTROL",
+};
+
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate )
+{
+ const char *dname;
+ const char *cname;
+ const char *ename;
+
+ if( old != newstate )
+ {
+ if( newstate == 0 )
+ {
+ dname = "NO cores";
+ }
+ else
+ {
+ dname = pmm_trace_get_core_name( newstate );
+ }
+
+ /* These state checks only work if the assumption that only cores can be
+ * turned on or turned off in seperate actions is true. If core power states can
+ * be toggled (some one, some off) at the same time, this check does not work
+ */
+ if( old > newstate )
+ {
+ /* Cores have turned off */
+ cname = pmm_trace_get_core_name( old - newstate );
+ ename = "OFF";
+ }
+ else
+ {
+ /* Cores have turned on */
+ cname = pmm_trace_get_core_name( newstate - old );
+ ename = "ON";
+ }
+ MALI_PRINT( ("PMM Trace: Hardware %s ON, %s just turned %s. { 0x%08x -> 0x%08x }", dname, cname, ename, old, newstate) );
+ }
+}
+
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate )
+{
+ if( old != newstate )
+ {
+ MALI_PRINT( ("PMM Trace: State changed from %s to %s", pmm_trace_state[old], pmm_trace_state[newstate]) );
+ }
+}
+
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy )
+{
+ if( old != newpolicy )
+ {
+ MALI_PRINT( ("PMM Trace: Policy changed from %s to %s", pmm_trace_policy[old], pmm_trace_policy[newpolicy]) );
+ }
+}
+
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received )
+{
+ const char *ename;
+ const char *dname;
+ const char *tname;
+ const char *format = "PMM Trace: Event %s { (%d) %s, %d ticks, (0x%x) %s }";
+
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ tname = (received) ? "received" : "sent";
+
+ if( event->id >= MALI_PMM_EVENT_INTERNALS )
+ {
+ ename = pmm_trace_events_internal[((int)event->id) - MALI_PMM_EVENT_INTERNALS];
+ }
+ else if( event->id >= MALI_PMM_EVENT_UKS )
+ {
+ ename = pmm_trace_events_uk[((int)event->id) - MALI_PMM_EVENT_UKS];
+ }
+ else
+ {
+ ename = pmm_trace_events[event->id];
+ }
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ dname = "os event";
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ dname = pmm_trace_get_core_name( (mali_pmm_core_mask)event->data );
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ dname = "timeout start";
+ /* Print data with a different format */
+ format = "PMM Trace: Event %s { (%d) %s, %d ticks, %d ticks %s }";
+ break;
+ default:
+ dname = "unknown data";
+ }
+
+ MALI_PRINT( (format, tname, (u32)event->id, ename, event->ts, (u32)event->data, dname) );
+}
+
+#endif /* MALI_PMM_TRACE */
+
+
+/****************** Mali Kernel API *****************/
+
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id )
+{
+ mali_subsystem_pmm_id = id;
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(PMU, malipmm_create));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource)
+{
+ /* Create PMM state memory */
+ MALI_DEBUG_ASSERT( pmm_state == NULL );
+ pmm_state = (_mali_pmm_internal_state_t *) _mali_osk_malloc(sizeof(*pmm_state));
+ MALI_CHECK_NON_NULL( pmm_state, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmm_state, 0, sizeof(*pmm_state));
+
+ /* Set up the initial PMM state */
+ pmm_state->waiting = 0;
+ pmm_state->status = MALI_PMM_STATUS_IDLE;
+ pmm_state->state = MALI_PMM_STATE_UNAVAILABLE; /* Until a core registers */
+
+ /* Set up policy via compile time option for the moment */
+#if MALI_PMM_ALWAYS_ON
+ pmm_state->policy = MALI_PMM_POLICY_ALWAYS_ON;
+#else
+ pmm_state->policy = MALI_PMM_POLICY_JOB_CONTROL;
+#endif
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( MALI_PMM_POLICY_NONE, pmm_state->policy );
+#endif
+
+ /* Set up assumes all values are initialized to NULL or MALI_FALSE, so
+ * we can exit halfway through set up and perform clean up
+ */
+#if !MALI_PMM_NO_PMU
+ if( mali_platform_init(resource) != _MALI_OSK_ERR_OK ) goto pmm_fail_cleanup;
+ pmm_state->pmu_initialized = MALI_TRUE;
+#endif
+
+ pmm_state->queue = _mali_osk_notification_queue_init();
+ if( !pmm_state->queue ) goto pmm_fail_cleanup;
+
+ pmm_state->iqueue = _mali_osk_notification_queue_init();
+ if( !pmm_state->iqueue ) goto pmm_fail_cleanup;
+
+ /* We are creating an IRQ handler just for the worker thread it gives us */
+ pmm_state->irq = _mali_osk_irq_init( _MALI_OSK_IRQ_NUMBER_PMM,
+ malipmm_irq_uhandler,
+ malipmm_irq_bhandler,
+ NULL,
+ NULL,
+ (void *)pmm_state, /* PMM state is passed to IRQ */
+ "PMM handler" );
+
+ if( !pmm_state->irq ) goto pmm_fail_cleanup;
+
+ pmm_state->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 75);
+ if( !pmm_state->lock ) goto pmm_fail_cleanup;
+
+ if( _mali_osk_atomic_init( &(pmm_state->messages_queued), 0 ) != _MALI_OSK_ERR_OK )
+ {
+ goto pmm_fail_cleanup;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem created, policy=%d\n", pmm_state->policy) );
+
+ MALI_SUCCESS;
+
+pmm_fail_cleanup:
+ MALI_PRINT_ERROR( ("PMM: subsystem failed to be created\n") );
+ if( pmm_state )
+ {
+ _mali_osk_resource_type_t t = PMU;
+ if( pmm_state->lock ) _mali_osk_lock_term( pmm_state->lock );
+ if( pmm_state->irq ) _mali_osk_irq_term( pmm_state->irq );
+ if( pmm_state->queue ) _mali_osk_notification_queue_term( pmm_state->queue );
+ if( pmm_state->iqueue ) _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if( pmm_state->pmu_initialized ) ( mali_platform_deinit(&t) );
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem loaded, policy initializing\n") );
+
+#if PMM_OS_TEST
+ power_test_start();
+#endif
+
+ /* Initialize the profile now the system has loaded - so that cores are
+ * not turned off during start up
+ */
+ return pmm_policy_init( pmm );
+}
+
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id )
+{
+ /* Check this is the right system */
+ MALI_DEBUG_ASSERT( id == mali_subsystem_pmm_id );
+ MALI_DEBUG_ASSERT_POINTER(pmm_state);
+
+ if( pmm_state )
+ {
+ _mali_osk_resource_type_t t = PMU;
+#if PMM_OS_TEST
+ power_test_end();
+#endif
+ /* Get the lock so we can shutdown */
+ MALI_PMM_LOCK(pmm_state);
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+ pmm_state->status = MALI_PMM_STATUS_OFF;
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+ MALI_PMM_UNLOCK(pmm_state);
+ pmm_policy_term(pmm_state);
+ _mali_osk_irq_term( pmm_state->irq );
+ _mali_osk_notification_queue_term( pmm_state->queue );
+ _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if( pmm_state->pmu_initialized ) mali_platform_deinit(&t);
+ _mali_osk_atomic_term( &(pmm_state->messages_queued) );
+ MALI_PMM_LOCK_TERM(pmm_state);
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem terminated\n") );
+}
+
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core )
+{
+ _mali_osk_errcode_t err;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( pmm == NULL )
+ {
+ /* PMM state has not been created, this is because the PMU resource has not been
+ * created yet.
+ * This probably means that the PMU resource has not been specfied as the first
+ * resource in the config file
+ */
+ MALI_PRINT_ERROR( ("PMM: Cannot register core %s because the PMU resource has not been\n initialized. Please make sure the PMU resource is the first resource in the\n resource configuration.\n",
+ pmm_trace_get_core_name(core)) );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered more than once in PMM */
+ MALI_DEBUG_ASSERT( (pmm->cores_registered & core) == 0 );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core registered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+#if !MALI_PMM_NO_PMU
+ /* Make sure the core is powered up */
+ err = mali_platform_powerup( core );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( _MALI_OSK_ERR_OK == err )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Assume a registered core is now powered up and idle */
+ pmm->cores_registered |= core;
+ pmm->cores_idle |= core;
+ pmm->cores_powered |= core;
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) powering up registered core: (0x%x) %s\n",
+ err, core, pmm_trace_get_core_name(core)) );
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+
+ return err;
+}
+
+void malipmm_core_unregister( mali_pmm_core_id core )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered in PMM */
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, core );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core unregistered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+
+#if !MALI_PMM_NO_PMU
+ /* Turn off the core */
+ if( mali_platform_powerdown( core ) != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error powering down unregistered core: (0x%x) %s\n",
+ core, pmm_trace_get_core_name(core)) );
+ }
+#endif
+
+ /* Remove the core from the system */
+ pmm->cores_registered &= (~core);
+ pmm->cores_idle &= (~core);
+ pmm->cores_powered &= (~core);
+ pmm->cores_pend_down &= (~core);
+ pmm->cores_pend_up &= (~core);
+ pmm->cores_ack_down &= (~core);
+ pmm->cores_ack_up &= (~core);
+
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+void malipmm_core_power_down_okay( mali_pmm_core_id core )
+{
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK,
+ 0 };
+
+ event.data = core;
+
+ _mali_ukk_pmm_event_message( &event );
+}
+
+void malipmm_set_policy_check()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ pmm->check_policy = MALI_TRUE;
+
+ /* To check the policy we need to schedule some work */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data)
+{
+ MALIPMM_DEBUG_PRINT( ("PMM: uhandler - not expected to be used\n") );
+
+ MALI_SUCCESS;
+}
+
+void malipmm_irq_bhandler(void *data)
+{
+ _mali_pmm_internal_state_t *pmm;
+ pmm = (_mali_pmm_internal_state_t *)data;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+#if PMM_OS_TEST
+ if( power_test_check() ) return;
+#endif
+
+ MALI_PMM_LOCK(pmm);
+#ifdef MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Quick out when we are shutting down */
+ if( pmm->status == MALI_PMM_STATUS_OFF )
+ {
+
+ #if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+ #endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ return;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: bhandler - Processing event\n") );
+
+ if( pmm->missed > 0 )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to send %d events", pmm->missed) );
+ pmm_fatal_reset( pmm );
+ }
+
+ if( pmm->check_policy )
+ {
+ pmm->check_policy = MALI_FALSE;
+ pmm_policy_check_policy(pmm);
+ }
+ else
+ {
+ /* Perform event processing */
+ pmm_event_process();
+ if( pmm->fatal_power_err )
+ {
+ /* Try a reset */
+ pmm_fatal_reset( pmm );
+ }
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+
+static void pmm_event_process( void )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ mali_pmm_message_t *event;
+ u32 process_messages;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+
+ /* Max number of messages to process before exiting - as we shouldn't stay
+ * processing the messages for a long time
+ */
+ process_messages = _mali_osk_atomic_read( &(pmm->messages_queued) );
+
+ while( process_messages > 0 )
+ {
+ /* Check internal message queue first */
+ err = _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ if( pmm->status == MALI_PMM_STATUS_IDLE || pmm->status == MALI_PMM_STATUS_OS_WAITING || pmm->status == MALI_PMM_STATUS_DVFS_PAUSE)
+ {
+ if( pmm->waiting > 0 ) pmm->waiting--;
+
+ /* We aren't busy changing state, so look at real events */
+ err = _mali_osk_notification_queue_dequeue( pmm->queue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ pmm->no_events++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - No message to process\n") );
+ /* Nothing to do - so return */
+ return;
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_received++;
+ #endif
+ }
+ }
+ else
+ {
+ /* Waiting for an internal message */
+ pmm->waiting++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - Waiting for internal message, messages queued=%d\n", pmm->waiting) );
+ return;
+ }
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_received++;
+ #endif
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( msg );
+ /* Check the message type matches */
+ MALI_DEBUG_ASSERT( msg->notification_type == MALI_PMM_NOTIFICATION_TYPE );
+
+ event = msg->result_buffer;
+
+ _mali_osk_atomic_dec( &(pmm->messages_queued) );
+ process_messages--;
+
+ #if MALI_PMM_TRACE
+ /* Trace before we process the event in case we have an error */
+ _mali_pmm_trace_event_message( event, MALI_TRUE );
+ #endif
+ err = pmm_policy_process( pmm, event );
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) in policy %d when processing event message with id: %d",
+ err, pmm->policy, event->id) );
+ }
+
+ /* Delete notification */
+ _mali_osk_notification_delete ( msg );
+
+ if( pmm->fatal_power_err )
+ {
+ /* Nothing good has happened - exit */
+ return;
+ }
+
+
+ #if MALI_PMM_TRACE
+ MALI_PRINT( ("PMM Trace: Event processed, msgs (sent/read) = %d/%d, int msgs (sent/read) = %d/%d, no events = %d, waiting = %d\n",
+ pmm->messages_sent, pmm->messages_received, pmm->imessages_sent, pmm->imessages_received, pmm->no_events, pmm->waiting) );
+ #endif
+ }
+
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->waiting > 0 )
+ {
+ /* For events we ignored whilst we were busy, add a new
+ * scheduled time to look at them */
+ _mali_osk_irq_schedulework( pmm->irq );
+ }
+}
+
+#if MALI_STATE_TRACKING
+void malipmm_subsystem_dump_state(void)
+{
+ malipmm_state_dump();
+}
+#endif
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+void malipmm_state_dump()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( !pmm )
+ {
+ MALI_PRINT(("PMM: Null state\n"));
+ }
+ else
+ {
+ MALI_PRINT(("Locks::\nPMM_LOCK_STATUS=%ld",pmm->mali_pmm_lock_acquired));
+ MALI_PRINT(("PMM state:\nPrevious_status=%d\nstatus=%d\nCurrent_event=%d\npolicy=%d\ncheck_policy=%d\nstate=%d\n", pmm->mali_last_pmm_status,pmm->status, pmm->mali_new_event_status, pmm->policy, pmm->check_policy, pmm->state));
+ MALI_PRINT(("PMM cores:\ncores_registered=%d\ncores_powered=%d\ncores_idle=%d\ncores_pend_down=%d\ncores_pend_up=%d\ncores_ack_down=%d\ncores_ack_up=%d\n", pmm->cores_registered, pmm->cores_powered, pmm->cores_idle, pmm->cores_pend_down, pmm->cores_pend_up, pmm->cores_ack_down, pmm->cores_ack_up));
+ MALI_PRINT(("PMM misc:\npmu_init=%d\nmessages_queued=%d\nwaiting=%d\nno_events=%d\nmissed=%d\nfatal_power_err=%d\n", pmm->pmu_initialized, _mali_osk_atomic_read( &(pmm->messages_queued) ), pmm->waiting, pmm->no_events, pmm->missed, pmm->fatal_power_err));
+ }
+}
+#endif
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h
new file mode 100644
index 00000000000..fe7a046cb47
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.h
+ * Defines the power management module for the kernel device driver
+ */
+
+#ifndef __MALI_PMM_H__
+#define __MALI_PMM_H__
+
+/* For mali_pmm_message_data and MALI_PMM_EVENT_UK_* defines */
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @defgroup pmmapi Power Management Module APIs
+ *
+ * @{
+ */
+
+/** OS event tester */
+#define PMM_OS_TEST 0
+
+/** @brief Compile option to turn on/off tracing */
+#define MALI_PMM_TRACE 0
+#define MALI_PMM_TRACE_SENT_EVENTS 0
+
+/** @brief Compile option to switch between always on or job control PMM policy */
+#define MALI_PMM_ALWAYS_ON 0
+
+/** @brief Overrides hardware PMU and uses software simulation instead
+ * @note This even stops intialization of PMU and cores being powered on at start up
+ */
+#define MALI_PMM_NO_PMU 0
+
+/** @brief PMM debug print to control debug message level */
+#define MALIPMM_DEBUG_PRINT(args) \
+ MALI_DEBUG_PRINT(3, args)
+
+
+/** @brief power management event message identifiers.
+ */
+/* These must match up with the pmm_trace_events & pmm_trace_events_internal
+ * arrays
+ */
+typedef enum mali_pmm_event_id
+{
+ MALI_PMM_EVENT_OS_POWER_UP = 0, /**< OS power up event */
+ MALI_PMM_EVENT_OS_POWER_DOWN = 1, /**< OS power down event */
+ MALI_PMM_EVENT_JOB_SCHEDULED = 2, /**< Job scheduled to run event */
+ MALI_PMM_EVENT_JOB_QUEUED = 3, /**< Job queued (but not run) event */
+ MALI_PMM_EVENT_JOB_FINISHED = 4, /**< Job finished event */
+ MALI_PMM_EVENT_TIMEOUT = 5, /**< Time out timer has expired */
+ MALI_PMM_EVENT_DVFS_PAUSE = 6, /**< Mali device pause event */
+ MALI_PMM_EVENT_DVFS_RESUME = 7, /**< Mali device resume event */
+
+ MALI_PMM_EVENT_UKS = 200, /**< Events from the user-side start here */
+ MALI_PMM_EVENT_UK_EXAMPLE = _MALI_PMM_EVENT_UK_EXAMPLE,
+
+ MALI_PMM_EVENT_INTERNALS = 1000,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK = 1001, /**< Internal power up acknowledgement */
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK = 1002, /**< Internal power down acknowledgment */
+} mali_pmm_event_id;
+
+
+/** @brief Use this when the power up/down callbacks do not need any OS data. */
+#define MALI_PMM_NO_OS_DATA 1
+
+
+/* @brief Geometry and pixel processor identifiers for the PMM
+ *
+ * @note these match the ARM Mali 400 PMU hardware definitions, apart from the "SYSTEM"
+ */
+typedef enum mali_pmm_core_id_tag
+{
+ MALI_PMM_CORE_SYSTEM = 0x00000000, /**< All of the Mali hardware */
+ MALI_PMM_CORE_GP = 0x00000001, /**< Mali GP2 */
+ MALI_PMM_CORE_L2 = 0x00000002, /**< Level 2 cache */
+ MALI_PMM_CORE_PP0 = 0x00000004, /**< Mali 200 pixel processor 0 */
+ MALI_PMM_CORE_PP1 = 0x00000008, /**< Mali 200 pixel processor 1 */
+ MALI_PMM_CORE_PP2 = 0x00000010, /**< Mali 200 pixel processor 2 */
+ MALI_PMM_CORE_PP3 = 0x00000020, /**< Mali 200 pixel processor 3 */
+ MALI_PMM_CORE_PP_ALL = 0x0000003C /**< Mali 200 pixel processors 0-3 */
+} mali_pmm_core_id;
+
+/* @brief PMM bitmask of mali_pmm_core_ids
+ */
+typedef u32 mali_pmm_core_mask;
+
+/* @brief PMM event timestamp type
+ */
+typedef u32 mali_pmm_timestamp;
+
+/** @brief power management event message struct
+ */
+typedef struct _mali_pmm_message
+{
+ mali_pmm_event_id id; /**< event id */
+ mali_pmm_message_data data; /**< specific data associated with the event */
+ mali_pmm_timestamp ts; /**< timestamp the event was placed in the event queue */
+} mali_pmm_message_t;
+
+
+
+/** @brief the state of the power management module.
+ */
+/* These must match up with the pmm_trace_state array */
+typedef enum mali_pmm_state_tag
+{
+ MALI_PMM_STATE_UNAVAILABLE = 0, /**< PMM is not available */
+ MALI_PMM_STATE_SYSTEM_ON = 1, /**< All of the Mali hardware is on */
+ MALI_PMM_STATE_SYSTEM_OFF = 2, /**< All of the Mali hardware is off */
+ MALI_PMM_STATE_SYSTEM_TRANSITION = 3 /**< System is changing state */
+} mali_pmm_state;
+
+
+/** @brief a power management policy.
+ */
+/* These must match up with the pmm_trace_policy array */
+typedef enum mali_pmm_policy_tag
+{
+ MALI_PMM_POLICY_NONE = 0, /**< No policy */
+ MALI_PMM_POLICY_ALWAYS_ON = 1, /**< Always on policy */
+ MALI_PMM_POLICY_JOB_CONTROL = 2, /**< Job control policy */
+ MALI_PMM_POLICY_RUNTIME_JOB_CONTROL = 3 /**< Run time power management control policy */
+} mali_pmm_policy;
+
+/** @brief Function to report to the OS when the power down has finished
+ *
+ * @param data The event message data that initiated the power down
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data);
+
+/** @brief Function to report to the OS when the power up has finished
+ *
+ * @param data The event message data that initiated the power up
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data);
+
+/** @brief Function to report that DVFS operation done
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data);
+
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief Function to notify power management events
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id event_id);
+
+#endif
+
+/** @brief Function to report the OS that device is idle
+ *
+ * @note inform the OS that device is idle
+ */
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle( void );
+
+/** @brief Function to report the OS to activate device
+ *
+ * @note inform the os that device needs to be activated
+ */
+void _mali_osk_pmm_dev_activate( void );
+
+/** @brief Queries the current state of the PMM software
+ *
+ * @note the state of the PMM can change after this call has returned
+ *
+ * @return the current PMM state value
+ */
+mali_pmm_state _mali_pmm_state( void );
+
+/** @brief List of cores that are registered with the PMM
+ *
+ * This will return the cores that have been currently registered with the PMM,
+ * which is a bitwise OR of the mali_pmm_core_id_tags. A value of 0x0 means that
+ * there are no cores registered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that have been registered with the PMM
+ */
+mali_pmm_core_mask _mali_pmm_cores_list( void );
+
+/** @brief List of cores that are powered up in the PMM
+ *
+ * This will return the subset of the cores that can be listed using mali_pmm_cores_
+ * list, that have power. It is a bitwise OR of the mali_pmm_core_id_tags. A value of
+ * 0x0 means that none of the cores registered are powered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that are powered up
+ */
+mali_pmm_core_mask _mali_pmm_cores_powered( void );
+
+
+/** @brief List of power management policies that are supported by the PMM
+ *
+ * Given an empty array of policies - policy_list - which contains the number
+ * of entries as specified by - policy_list_size, this function will populate
+ * the list with the available policies. If the policy_list is too small for
+ * all the policies then only policy_list_size entries will be returned. If the
+ * policy_list is bigger than the number of available policies then, the extra
+ * entries will be set to MALI_PMM_POLICY_NONE.
+ * The function will also update available_policies with the number of policies
+ * that are available, even if it exceeds the policy_list_size.
+ * The function will succeed if all policies could be returned, else it will
+ * fail if none or only a subset of policies could be returned.
+ * The function will also fail if no policy_list is supplied, though
+ * available_policies is optional.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy_list_size is the number of policies that can be returned in
+ * the policy_list argument
+ * @param policy_list is an array of policies that should be populated with
+ * the list of policies that are supported by the PMM
+ * @param policies_available optional argument, if non-NULL will be set to the
+ * number of policies available
+ * @return _MALI_OSK_ERR_OK if the policies could be listed, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available );
+
+/** @brief Set the power management policy in the PMM
+ *
+ * Given a valid supported policy, this function will change the PMM to use
+ * this new policy
+ * The function will fail if the policy given is invalid or unsupported.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy the new policy to be set
+ * @return _MALI_OSK_ERR_OK if the policy could be set, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy );
+
+/** @brief Get the current power management policy in the PMM
+ *
+ * Given a pointer to a policy data type, this function will return the current
+ * policy that is in effect for the PMM. This maybe out of date if there is a
+ * pending set policy call that has not been serviced.
+ * The function will fail if the policy given is NULL.
+ *
+ * @note the policy of the PMM can change after this call has returned
+ *
+ * @param policy a pointer to a policy that can be updated to the current
+ * policy
+ * @return _MALI_OSK_ERR_OK if the policy could be returned, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy );
+
+#if MALI_PMM_TRACE
+
+/** @brief Indicates when a hardware state change occurs in the PMM
+ *
+ * @param old a mask of the cores indicating the previous state of the cores
+ * @param newstate a mask of the cores indicating the new current state of the cores
+ */
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate );
+
+/** @brief Indicates when a state change occurs in the PMM
+ *
+ * @param old the previous state for the PMM
+ * @param newstate the new current state of the PMM
+ */
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate );
+
+/** @brief Indicates when a policy change occurs in the PMM
+ *
+ * @param old the previous policy for the PMM
+ * @param newpolicy the new current policy of the PMM
+ */
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy );
+
+/** @brief Records when an event message is read by the event system
+ *
+ * @param event the message details
+ * @param received MALI_TRUE when the message is received by the PMM, else it is being sent
+ */
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received );
+
+#endif /* MALI_PMM_TRACE */
+
+/** @brief Dumps the current state of OS PMM thread
+ */
+#if MALI_STATE_TRACKING
+void mali_pmm_dump_os_thread_state( void );
+#endif /* MALI_STATE_TRACKING */
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+/** @brief Dumps the current state of the PMM
+ */
+void malipmm_state_dump( void );
+#endif
+
+/** @} */ /* end group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c
new file mode 100644
index 00000000000..327e8b4c853
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.c
+ * Implementation of the common routines for power management module
+ * policies
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+
+#include "mali_pmm_policy_alwayson.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+/* Call back function for timer expiration */
+static void pmm_policy_timer_callback( void *arg );
+
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pptimer, 0, sizeof(*pptimer));
+
+ pptimer->timer = _mali_osk_timer_init();
+ if( pptimer->timer )
+ {
+ _mali_osk_timer_setcallback( pptimer->timer, pmm_policy_timer_callback, (void *)pptimer );
+ pptimer->timeout = timeout;
+ pptimer->event_id = id;
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void pmm_policy_timer_callback( void *arg )
+{
+ _pmm_policy_timer_t *pptimer = (_pmm_policy_timer_t *)arg;
+
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT( pptimer->set );
+
+ /* Set timer expired and flag there is a policy to check */
+ pptimer->expired = MALI_TRUE;
+ malipmm_set_policy_check();
+}
+
+
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ _mali_osk_timer_del( pptimer->timer );
+ _mali_osk_timer_term( pptimer->timer );
+ pptimer->timer = NULL;
+}
+
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( !(pptimer->set) )
+ {
+ pptimer->set = MALI_TRUE;
+ pptimer->expired = MALI_FALSE;
+ pptimer->start = _mali_osk_time_tickcount();
+ _mali_osk_timer_add( pptimer->timer, pptimer->timeout );
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( pptimer->set )
+ {
+ _mali_osk_timer_del( pptimer->timer );
+ pptimer->set = MALI_FALSE;
+ pptimer->expired = MALI_FALSE;
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ if( pptimer->expired )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_TIMEOUT, /* Assume timeout id, but set it below */
+ 0 };
+
+ event.id = pptimer->event_id;
+ event.data = (mali_pmm_message_data)pptimer->start;
+
+ /* Don't need to do any other notification with this timer */
+ pptimer->expired = MALI_FALSE;
+ /* Unset timer so it is free to be set again */
+ pptimer->set = MALI_FALSE;
+
+ _mali_ukk_pmm_event_message( &event );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start )
+{
+ return (_mali_osk_time_after( other_start, timer_start ) == 0);
+}
+
+
+_mali_osk_errcode_t pmm_policy_init(_mali_pmm_internal_state_t *pmm)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_init_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_init_job_control(pmm);
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+void pmm_policy_term(_mali_pmm_internal_state_t *pmm)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ pmm_policy_term_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_term_job_control();
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ MALI_PRINT_ERROR( ("PMM: Invalid policy terminated %d\n", pmm->policy) );
+ }
+}
+
+
+_mali_osk_errcode_t pmm_policy_process(_mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_process_always_on( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_process_job_control( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_check_job_control();
+ }
+ break;
+
+ default:
+ /* Nothing needs to be done */
+ break;
+ }
+}
+
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h
new file mode 100644
index 00000000000..83cb7f29a92
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_H__
+#define __MALI_PMM_POLICY_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Generic timer for use with policies
+ */
+typedef struct _pmm_policy_timer
+{
+ u32 timeout; /**< Timeout for this timer in ticks */
+ mali_pmm_event_id event_id; /**< Event id that will be raised when timer expires */
+ _mali_osk_timer_t *timer; /**< Timer */
+ mali_bool set; /**< Timer set */
+ mali_bool expired; /**< Timer expired - event needs to be raised */
+ u32 start; /**< Timer start ticks */
+} _pmm_policy_timer_t;
+
+/** @brief Policy timer initialization
+ *
+ * This will create a timer for use in policies, but won't start it
+ *
+ * @param pptimer An empty timer structure to be initialized
+ * @param timeout Timeout in ticks for the timer
+ * @param id Event id that will be raised on timeout
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id );
+
+/** @brief Policy timer termination
+ *
+ * This will clean up a timer that was previously used in policies, it
+ * will also stop it if started
+ *
+ * @param pptimer An initialized timer structure to be terminated
+ */
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer start
+ *
+ * This will start a previously created timer for use in policies
+ * When the timer expires after the initialized timeout it will raise
+ * a PMM event of the event id given on initialization
+ * As data for the event it will pass the start time of the timer
+ *
+ * @param pptimer A previously initialized policy timer
+ * @return MALI_TRUE if the timer was started, MALI_FALSE if it is already started
+ */
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This will stop a previously created timer for use in policies
+ *
+ * @param pptimer A previously started policy timer
+ * @return MALI_TRUE if the timer was stopped, MALI_FALSE if it is already stopped
+ */
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This raise an event for an expired timer
+ *
+ * @param pptimer An expired policy timer
+ * @return MALI_TRUE if an event was raised, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer valid checker
+ *
+ * This will check that a timer was started after a given time
+ *
+ * @param timer_start Time the timer was started
+ * @param other_start Time when another event or action occurred
+ * @return MALI_TRUE if the timer was started after the other time, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start );
+
+
+/** @brief Common policy initialization
+ *
+ * This will initialize the current policy
+ *
+ * @note Any previously initialized policy should be terminated first
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy termination
+ *
+ * This will terminate the current policy.
+ * @note This can be called when a policy has not been initialized
+ */
+void pmm_policy_term( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy state changer
+ *
+ * Given the next available event message, this routine passes it to
+ * the current policy for processing
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+
+/** @brief Common policy checker
+ *
+ * If a policy timer fires then this function will be called to
+ * allow the policy to take the correct action
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm );
+
+/** @} */ /* End group pmmapi_policy */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c
new file mode 100644
index 00000000000..643bb04553b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.c
+ * Implementation of the power management module policy - always on
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_alwayson.h"
+
+_mali_osk_errcode_t pmm_policy_init_always_on(void)
+{
+ /* Nothing to set up */
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_always_on(void)
+{
+ /* Nothing to tear down */
+}
+
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* We aren't going to do anything, but signal so we don't block the OS
+ * NOTE: This may adversely affect any jobs Mali is currently running
+ */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Nothing to do */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ /* Nothing to do - we are always on */
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ default:
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h
new file mode 100644
index 00000000000..a158b09f610
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_alwayson.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.h
+ * Defines the power management module policy for always on
+ */
+
+#ifndef __MALI_PMM_POLICY_ALWAYSON_H__
+#define __MALI_PMM_POLICY_ALWAYSON_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Always on policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_always_on(void);
+
+/** @brief Always on policy termination
+ */
+void pmm_policy_term_always_on(void);
+
+/** @brief Always on policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Always on policy will ignore all events and keep the Mali cores on
+ * all the time
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @} */ /* End group pmmapi_policies */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_ALWAYSON_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c
new file mode 100644
index 00000000000..8450bd722f4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_jobcontrol.c
+ * Implementation of the power management module policy - job control
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+typedef struct _pmm_policy_data_job_control
+{
+ _pmm_policy_timer_t latency; /**< Latency timeout timer for all cores */
+ u32 core_active_start; /**< Last time a core was set to active */
+ u32 timeout; /**< Timeout in ticks for latency timer */
+} _pmm_policy_data_job_control_t;
+
+
+/* @ brief Local data for this policy
+ */
+static _pmm_policy_data_job_control_t *data_job_control = NULL;
+
+/* @brief Set up the timeout if it hasn't already been set and if there are active cores */
+static void job_control_timeout_setup( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Do we have an inactivity time out and some powered cores? */
+ if( pptimer->timeout > 0 && pmm->cores_powered != 0 )
+ {
+ /* Is the system idle and all the powered cores are idle? */
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->cores_idle == pmm->cores_powered )
+ {
+ if( pmm_policy_timer_start(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Setting in-activity latency timer\n") );
+ }
+ }
+ else
+ {
+ /* We are not idle so there is no need for an inactivity timer
+ */
+ if( pmm_policy_timer_stop(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Removing in-activity latency timer\n") );
+ }
+ }
+ }
+}
+
+/* @brief Check the validity of the timeout - and if there is one set */
+static mali_bool job_control_timeout_valid( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer, u32 timer_start )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Not a valid timer! */
+ if( pptimer->timeout == 0 ) return MALI_FALSE;
+
+ /* Are some cores powered and are they all idle? */
+ if( (pmm->cores_powered != 0) && (pmm->cores_idle == pmm->cores_powered) )
+ {
+ /* Has latency timeout started after the last core was active? */
+ if( pmm_policy_timer_valid( timer_start, data_job_control->core_active_start ) )
+ {
+ return MALI_TRUE;
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - out of date\n") );
+ }
+ }
+ else
+ {
+ if( pmm->cores_powered == 0 )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores already off\n") );
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores active\n") );
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+_mali_osk_errcode_t pmm_policy_init_job_control( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER( pmm );
+ MALI_DEBUG_ASSERT( data_job_control == NULL );
+
+ data_job_control = (_pmm_policy_data_job_control_t *) _mali_osk_malloc(sizeof(*data_job_control));
+ MALI_CHECK_NON_NULL( data_job_control, _MALI_OSK_ERR_NOMEM );
+
+ data_job_control->core_active_start = _mali_osk_time_tickcount();
+ data_job_control->timeout = MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT;
+
+ err = pmm_policy_timer_init( &data_job_control->latency, data_job_control->timeout, MALI_PMM_EVENT_TIMEOUT );
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ return err;
+ }
+
+ /* Start the latency timeout */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_job_control(void)
+{
+ if( data_job_control != NULL )
+ {
+ pmm_policy_timer_term( &data_job_control->latency );
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ }
+}
+
+static void pmm_policy_job_control_job_queued( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+
+ /* Make sure that all cores are powered in this
+ * simple policy
+ */
+ cores = pmm->cores_registered;
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until finished */
+ pmm->status = MALI_PMM_STATUS_POLICY_POWER_UP;
+ }
+ }
+}
+
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process start - status=%d\n", pmm->status) );
+
+ /* Mainly the data is the cores */
+ cores = pmm_cores_from_event_data( pmm, event );
+
+#if MALI_STATE_TRACKING
+ pmm->mali_last_pmm_status = pmm->status;
+#endif /* MALI_STATE_TRACKING */
+
+ switch( pmm->status )
+ {
+ /**************** IDLE ****************/
+ case MALI_PMM_STATUS_IDLE:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Not expected in this state */
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+
+ /* Update idle cores to indicate active - remove these! */
+ pmm_cores_set_active( pmm, cores );
+ /* Remember when this happened */
+ data_job_control->core_active_start = event->ts;
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_SCHEDULED);
+#endif
+
+ /*** FALL THROUGH to QUEUED to check POWER UP ***/
+
+ case MALI_PMM_EVENT_JOB_QUEUED:
+
+ pmm_policy_job_control_job_queued( pmm );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_QUEUED);
+#endif
+ break;
+
+ case MALI_PMM_EVENT_DVFS_PAUSE:
+
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if ( cores_subset != 0 )
+ {
+ if ( !pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 1;
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ pmm_save_os_event_data( pmm, event->data );
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done(0);
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+
+ /* Need to power down all cores even if we need to wait for them */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering down */
+ if( !pmm_invoke_power_down( pmm ) )
+ {
+ /* We need to wait until they are idle */
+
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ /* All cores now down - respond to OS power event */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+
+ /* Update idle cores - add these! */
+ pmm_cores_set_idle( pmm, cores );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_FINISHED);
+#endif
+ if( data_job_control->timeout > 0 )
+ {
+ /* Wait for time out to fire */
+ break;
+ }
+ /* For job control policy - turn off all cores */
+ cores = pmm->cores_powered;
+
+ /*** FALL THROUGH to TIMEOUT TEST as NO TIMEOUT ***/
+
+ case MALI_PMM_EVENT_TIMEOUT:
+
+ /* Main job control policy - turn off cores after inactivity */
+ if( job_control_timeout_valid( pmm, &data_job_control->latency, (u32)event->data ) )
+ {
+ /* Valid timeout of inactivity - so find out if we can power down
+ * immedately - if we can't then this means the cores are still in fact
+ * active
+ */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_TRUE );
+ if( cores_subset != 0 )
+ {
+ /* Check if we can really power down, if not then we are not
+ * really in-active
+ */
+ if( !pmm_invoke_power_down( pmm ) )
+ {
+ pmm_power_down_cancel( pmm );
+ }
+ }
+ /* else there are no cores powered up! */
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_TIMEOUT);
+#endif
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /******************DVFS PAUSE**************/
+ case MALI_PMM_STATUS_DVFS_PAUSE:
+ switch ( event->id )
+ {
+ case MALI_PMM_EVENT_DVFS_RESUME:
+
+ if ( pmm->cores_powered != 0 )
+ {
+ pmm->cores_ack_down =0;
+ pmm_power_down_cancel( pmm );
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ else
+ {
+ pmm_policy_job_control_job_queued( pmm );
+ }
+ _mali_osk_pmm_dvfs_operation_done( 0 );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ if ( pmm->cores_powered != 0 )
+ {
+ if ( pmm_invoke_power_down( pmm ) )
+ {
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ }
+ }
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ default:
+ break;
+ }
+ break;
+
+ /**************** POWER UP ****************/
+ case MALI_PMM_STATUS_OS_POWER_UP:
+ case MALI_PMM_STATUS_POLICY_POWER_UP:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ /* Make sure cores powered off equal what we expect */
+ MALI_DEBUG_ASSERT( cores == pmm->cores_pend_up );
+ pmm_cores_set_up_ack( pmm, cores );
+
+ if( pmm_invoke_power_up( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /**************** POWER DOWN ****************/
+ case MALI_PMM_STATUS_OS_POWER_DOWN:
+ case MALI_PMM_STATUS_POLICY_POWER_DOWN:
+ switch( event->id )
+ {
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+
+ pmm_cores_set_down_ack( pmm, cores );
+
+ if ( pmm->is_dvfs_active == 1 )
+ {
+ if( pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 0;
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ break;
+ }
+
+ /* Now check if we can power down */
+ if( pmm_invoke_power_down( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ case MALI_PMM_STATUS_OS_WAITING:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until power up complete */
+ pmm->status = MALI_PMM_STATUS_OS_POWER_UP;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ /* All cores now up - respond to OS power up event */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ default:
+ /* All other messages are ignored in this state */
+ break;
+ }
+ break;
+
+ default:
+ /* Unexpected state */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Set in-activity latency timer - if required */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ /* Update the PMM state */
+ pmm_update_system_state( pmm );
+#if MALI_STATE_TRACKING
+ pmm->mali_new_event_status = event->id;
+#endif /* MALI_STATE_TRACKING */
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process end - status=%d and event=%d\n", pmm->status,event->id) );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_check_job_control()
+{
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ /* Latency timer must have expired raise the event */
+ pmm_policy_timer_raise_event(&data_job_control->latency);
+}
+
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h
new file mode 100644
index 00000000000..455234b80bc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_policy_jobcontrol.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_JOBCONTROL_H__
+#define __MALI_PMM_POLICY_JOBCONTROL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief The jobcontrol policy inactivity latency timeout (in ticks)
+ * before the hardware is switched off
+ *
+ * @note Setting this low whilst tracing or producing debug output can
+ * cause alot of timeouts to fire which can affect the PMM behaviour
+ */
+#define MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT 50
+
+/** @brief Job control policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_job_control(_mali_pmm_internal_state_t *pmm);
+
+/** @brief Job control policy termination
+ */
+void pmm_policy_term_job_control(void);
+
+/** @brief Job control policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Job control policy depends on events from the Mali cores, and will
+ * power down all cores after an inactivity latency timeout. It will
+ * power the cores back on again when a job is scheduled to run.
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Job control policy checker
+ *
+ * The latency timer has fired and we need to raise the correct event to
+ * handle it
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_job_control(void);
+
+/** @} */ /* End group pmmapi_policy */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_JOBCONTROL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c
new file mode 100644
index 00000000000..a0ac1c7790c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_state.c
+ * Implementation of the power management module internal state
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_system.h"
+
+#include "mali_kernel_core.h"
+#include "mali_platform.h"
+
+#define SIZEOF_CORES_LIST 6
+
+/* NOTE: L2 *MUST* be first on the list so that it
+ * is correctly powered on first and powered off last
+ */
+static mali_pmm_core_id cores_list[] = { MALI_PMM_CORE_L2,
+ MALI_PMM_CORE_GP,
+ MALI_PMM_CORE_PP0,
+ MALI_PMM_CORE_PP1,
+ MALI_PMM_CORE_PP2,
+ MALI_PMM_CORE_PP3 };
+
+
+
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_state state;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm->cores_registered == 0 )
+ {
+ state = MALI_PMM_STATE_UNAVAILABLE;
+ }
+ else if( pmm->cores_powered == 0 )
+ {
+ state = MALI_PMM_STATE_SYSTEM_OFF;
+ }
+ else if( pmm->cores_powered == pmm->cores_registered )
+ {
+ state = MALI_PMM_STATE_SYSTEM_ON;
+ }
+ else
+ {
+ /* Some other state where not everything is on or off */
+ state = MALI_PMM_STATE_SYSTEM_TRANSITION;
+ }
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_state_change( pmm->state, state );
+#endif
+ pmm->state = state;
+}
+
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* All cores - the system */
+ cores = pmm->cores_registered;
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Currently the main event data is only the cores
+ * for these messages
+ */
+ cores = (mali_pmm_core_mask)event->data;
+ if( cores == MALI_PMM_CORE_SYSTEM )
+ {
+ cores = pmm->cores_registered;
+ }
+ else if( cores == MALI_PMM_CORE_PP_ALL )
+ {
+ /* Get the subset of registered PP cores */
+ cores = (pmm->cores_registered & MALI_PMM_CORE_PP_ALL);
+ }
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+ break;
+
+ default:
+ /* Assume timeout messages - report cores still powered */
+ cores = pmm->cores_powered;
+ break;
+ }
+
+ return cores;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power down when asked for power up */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ cores_subset = (~(pmm->cores_powered) & cores);
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ pmm->cores_pend_up = cores_subset;
+ }
+
+ return cores_subset;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only )
+{
+ mali_pmm_core_mask cores_subset;
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power up when asked for power down */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ cores_subset = (pmm->cores_powered & cores);
+ if( cores_subset != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *ppowered = &(pmm->cores_powered);
+
+ /* There are some cores that need powering up, but we may
+ * need to wait until they are idle
+ */
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & cores_subset) != 0 )
+ {
+ /* Core is to be powered down */
+ pmm->cores_pend_down |= cores_list[n];
+
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * down, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Signal the core to power down
+ * If it is busy (not idle) it will set a pending power down flag
+ * (as long as we don't want to only immediately power down).
+ * If it isn't busy it will move out of the idle queue right
+ * away
+ */
+ err = mali_core_signal_power_down( cores_list[n], immediate_only );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Re-read cores_subset in case it has changed */
+ cores_subset = (*ppowered & cores);
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+ /* We moved an idle core to the power down queue
+ * which means it is now acknowledged (if it is still
+ * registered)
+ */
+ pmm->cores_ack_down |= (cores_list[n] & cores_subset);
+ }
+ else
+ {
+ MALI_DEBUG_ASSERT( err == _MALI_OSK_ERR_BUSY ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*ppowered & cores_list[n]) == 0) );
+ /* If we didn't move a core - it must be active, so
+ * leave it pending, so we get an acknowledgement (when
+ * not in immediate only mode)
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ }
+
+ return cores_subset;
+}
+
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm )
+{
+ int n;
+ mali_pmm_core_mask pd, ad;
+ _mali_osk_errcode_t err;
+ volatile mali_pmm_core_mask *pregistered;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Cancelling power down\n") );
+
+ pd = pmm->cores_pend_down;
+ ad = pmm->cores_ack_down;
+ /* Clear the pending cores so that they don't move to the off
+ * queue if they haven't already
+ */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+ pregistered = &(pmm->cores_registered);
+
+ /* Power up all the pending power down cores - just so
+ * we make sure the system is in a known state, as a
+ * pending core might have sent an acknowledged message
+ * which hasn't been read yet.
+ */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & pd) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following power up function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* As we are cancelling - only move the cores back to the queue -
+ * no reset needed
+ */
+ err = mali_core_signal_power_up( cores_list[n], MALI_TRUE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Update pending list with the current registered cores */
+ pd &= (*pregistered);
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_BUSY &&
+ ((cores_list[n] & ad) == 0)) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ /* If we didn't power up a core - it must be active and
+ * hasn't actually tried to power down - this is expected
+ * for cores that haven't acknowledged
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ /* Only used in debug builds */
+ MALI_IGNORE(ad);
+}
+
+
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_down == pmm->cores_ack_down ? MALI_TRUE : MALI_FALSE );
+}
+
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power down during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down != 0 );
+ /* Check that cores are not pending power up during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ if( !pmm_power_down_okay( pmm ) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: Waiting for cores to go idle for power off - 0x%08x / 0x%08x\n",
+ pmm->cores_pend_down, pmm->cores_ack_down) );
+ return MALI_FALSE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ err = mali_platform_powerdown( pmm->cores_pend_down );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Remove powered down cores from idle and powered list */
+ pmm->cores_powered &= ~(pmm->cores_pend_down);
+ pmm->cores_idle &= ~(pmm->cores_pend_down);
+ /* Reset pending/acknowledged status */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power down cores - (0x%x) %s",
+ pmm->cores_pend_down, pmm_trace_get_core_name(pmm->cores_pend_down)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_up == pmm->cores_ack_up ? MALI_TRUE : MALI_FALSE );
+}
+
+
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power up during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up != 0 );
+ /* Check that cores are not pending power down during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ if( pmm_power_up_okay( pmm ) )
+ {
+ /* Power up has completed - sort out subsystem core status */
+
+ int n;
+ /* Use volatile to access, so that it is updated if any cores are unregistered */
+ volatile mali_pmm_core_mask *ppendup = &(pmm->cores_pend_up);
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Move cores into idle queues */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & (*ppendup)) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_FAULT &&
+ (*ppendup & cores_list[n]) == 0) );
+ /* We only expect this to fail when we are shutting down
+ * and the core has been unregistered
+ */
+ }
+ }
+ }
+ /* Finished power up - add cores to idle and powered list */
+ pmm->cores_powered |= (*ppendup);
+ pmm->cores_idle |= (*ppendup);
+ /* Reset pending/acknowledge status */
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_up = 0;
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ return MALI_TRUE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ /* Power up must now be done */
+ err = mali_platform_powerup( pmm->cores_pend_up );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power up cores - (0x%x) %s",
+ pmm->cores_pend_up, pmm_trace_get_core_name(pmm->cores_pend_up)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ else
+ {
+ /* TBD - Update core status immediately rather than use event message */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK,
+ 0 };
+ /* All the cores that were pending power up, have now completed power up */
+ event.data = pmm->cores_pend_up;
+ _mali_ukk_pmm_event_message( &event );
+ MALIPMM_DEBUG_PRINT( ("PMM: Sending ACK to power up") );
+ }
+ }
+
+ /* Always return false, as we need an interrupt to acknowledge
+ * when power up is complete
+ */
+ return MALI_FALSE;
+}
+
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle &= (~cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle |= (cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power down */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_down & cores) != 0 );
+ /* Check core has not acknowledged power down more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_down & cores) == 0 );
+
+ pmm->cores_ack_down |= (cores);
+
+ return pmm->cores_ack_down;
+}
+
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ mali_pmm_status status;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALIPMM_DEBUG_PRINT( ("PMM: Fatal Reset called") );
+
+ MALI_DEBUG_ASSERT( pmm->status != MALI_PMM_STATUS_OFF );
+
+ /* Reset the common status */
+ pmm->waiting = 0;
+ pmm->missed = 0;
+ pmm->fatal_power_err = MALI_FALSE;
+ pmm->no_events = 0;
+ pmm->check_policy = MALI_FALSE;
+ pmm->cores_pend_down = 0;
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_down = 0;
+ pmm->cores_ack_up = 0;
+ pmm->is_dvfs_active = 0;
+#if MALI_PMM_TRACE
+ pmm->messages_sent = 0;
+ pmm->messages_received = 0;
+ pmm->imessages_sent = 0;
+ pmm->imessages_received = 0;
+ MALI_PRINT( ("PMM Trace: *** Fatal reset occurred ***") );
+#endif
+
+ /* Set that we are unavailable whilst resetting */
+ pmm->state = MALI_PMM_STATE_UNAVAILABLE;
+ status = pmm->status;
+ pmm->status = MALI_PMM_STATUS_OFF;
+
+ /* We want all cores powered */
+ pmm->cores_powered = pmm->cores_registered;
+ /* The cores may not be idle, but this state will be rectified later */
+ pmm->cores_idle = pmm->cores_registered;
+
+ /* So power on any cores that are registered */
+ if( pmm->cores_registered != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *pregistered = &(pmm->cores_registered);
+#if !MALI_PMM_NO_PMU
+ err = mali_platform_powerup( pmm->cores_registered );
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ /* This is very bad as we can't even be certain the cores are now
+ * powered up
+ */
+ MALI_PRINT_ERROR( ("PMM: Failed to perform PMM reset!\n") );
+ /* TBD driver exit? */
+ }
+
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & (*pregistered)) != 0 )
+ {
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Core is now active - so try putting it in the idle queue */
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* We either succeeded, or we were not off anyway, or we have
+ * just be deregistered
+ */
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_OK) ||
+ (err == _MALI_OSK_ERR_BUSY) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ }
+ }
+ }
+
+ /* Unblock any pending OS event */
+ if( status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ if( status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down
+ * NOTE: We are not powered down at this point due to power problems,
+ * so we are lying to the system, but something bad has already
+ * happened and we are trying unstick things
+ * TBD - Add busy loop to power down cores?
+ */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+
+ /* Purge the event queues */
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->queue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ /* Return status/state to normal */
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ pmm_update_system_state(pmm);
+}
+
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power up */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_up & cores) != 0 );
+ /* Check core has not acknowledged power up more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_up & cores) == 0 );
+
+ pmm->cores_ack_up |= (cores);
+
+ return pmm->cores_ack_up;
+}
+
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is no saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data == 0 );
+ /* Can't store zero data - as retrieve check will fail */
+ MALI_DEBUG_ASSERT( data != 0 );
+
+ pmm->os_data = data;
+}
+
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm)
+{
+ mali_pmm_message_data data;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data != 0 );
+
+ /* Get data, and clear the saved version */
+ data = pmm->os_data;
+ pmm->os_data = 0;
+
+ return data;
+}
+
+/* Create list of core names to look up
+ * We are doing it this way to overcome the need for
+ * either string allocation, or stack space, so we
+ * use constant strings instead
+ */
+typedef struct pmm_trace_corelist
+{
+ mali_pmm_core_mask id;
+ const char *name;
+} pmm_trace_corelist_t;
+
+static pmm_trace_corelist_t pmm_trace_cores[] = {
+ { MALI_PMM_CORE_SYSTEM, "SYSTEM" },
+ { MALI_PMM_CORE_GP, "GP" },
+ { MALI_PMM_CORE_L2, "L2" },
+ { MALI_PMM_CORE_PP0, "PP0" },
+ { MALI_PMM_CORE_PP1, "PP1" },
+ { MALI_PMM_CORE_PP2, "PP2" },
+ { MALI_PMM_CORE_PP3, "PP3" },
+ { MALI_PMM_CORE_PP_ALL, "PP (all)" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0),
+ "GP+L2+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0),
+ "GP+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+L2+PP0+PP1" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+PP0+PP1" },
+ { 0, NULL } /* Terminator of list */
+};
+
+const char *pmm_trace_get_core_name( mali_pmm_core_mask cores )
+{
+ const char *dname = NULL;
+ int cl;
+
+ /* Look up name in corelist */
+ cl = 0;
+ while( pmm_trace_cores[cl].name != NULL )
+ {
+ if( pmm_trace_cores[cl].id == cores )
+ {
+ dname = pmm_trace_cores[cl].name;
+ break;
+ }
+ cl++;
+ }
+
+ if( dname == NULL )
+ {
+ /* We don't know a good short-hand for the configuration */
+ dname = "[multi-core]";
+ }
+
+ return dname;
+}
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h
new file mode 100644
index 00000000000..3c8c31f066d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_state.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_state.h
+ * Defines the internal power management module state
+ */
+
+#ifndef __MALI_PMM_STATE_H__
+#define __MALI_PMM_STATE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_state Power Management Module State
+ *
+ * @{
+ */
+
+/* Check that the subset is really a subset of cores */
+#define MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( cores, subset ) \
+ MALI_DEBUG_ASSERT( ((~(cores)) & (subset)) == 0 )
+
+
+/* Locking macros */
+#define MALI_PMM_LOCK(pmm) \
+ _mali_osk_lock_wait( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_UNLOCK(pmm) \
+ _mali_osk_lock_signal( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_LOCK_TERM(pmm) \
+ _mali_osk_lock_term( pmm->lock )
+
+/* Notification type for messages */
+#define MALI_PMM_NOTIFICATION_TYPE 0
+
+/** @brief Status of the PMM state machine
+ */
+typedef enum mali_pmm_status_tag
+{
+ MALI_PMM_STATUS_IDLE, /**< PMM is waiting next event */
+ MALI_PMM_STATUS_POLICY_POWER_DOWN, /**< Policy initiated power down */
+ MALI_PMM_STATUS_POLICY_POWER_UP, /**< Policy initiated power down */
+ MALI_PMM_STATUS_OS_WAITING, /**< PMM is waiting for OS power up */
+ MALI_PMM_STATUS_OS_POWER_DOWN, /**< OS initiated power down */
+ MALI_PMM_STATUS_RUNTIME_IDLE_IN_PROGRESS,
+ MALI_PMM_STATUS_DVFS_PAUSE, /**< PMM DVFS Status Pause */
+ MALI_PMM_STATUS_OS_POWER_UP, /**< OS initiated power up */
+ MALI_PMM_STATUS_OFF, /**< PMM is not active */
+} mali_pmm_status;
+
+
+/** @brief Internal state of the PMM
+ */
+typedef struct _mali_pmm_internal_state
+{
+ mali_pmm_status status; /**< PMM state machine */
+ mali_pmm_policy policy; /**< PMM policy */
+ mali_bool check_policy; /**< PMM policy needs checking */
+ mali_pmm_state state; /**< PMM state */
+ mali_pmm_core_mask cores_registered; /**< Bitmask of cores registered */
+ mali_pmm_core_mask cores_powered; /**< Bitmask of cores powered up */
+ mali_pmm_core_mask cores_idle; /**< Bitmask of cores idle */
+ mali_pmm_core_mask cores_pend_down; /**< Bitmask of cores pending power down */
+ mali_pmm_core_mask cores_pend_up; /**< Bitmask of cores pending power up */
+ mali_pmm_core_mask cores_ack_down; /**< Bitmask of cores acknowledged power down */
+ mali_pmm_core_mask cores_ack_up; /**< Bitmask of cores acknowledged power up */
+
+ _mali_osk_notification_queue_t *queue; /**< PMM event queue */
+ _mali_osk_notification_queue_t *iqueue; /**< PMM internal event queue */
+ _mali_osk_irq_t *irq; /**< PMM irq handler */
+ _mali_osk_lock_t *lock; /**< PMM lock */
+
+ mali_pmm_message_data os_data; /**< OS data sent via the OS events */
+
+ mali_bool pmu_initialized; /**< PMU initialized */
+
+ _mali_osk_atomic_t messages_queued; /**< PMM event messages queued */
+ u32 waiting; /**< PMM waiting events - due to busy */
+ u32 no_events; /**< PMM called to process when no events */
+
+ u32 missed; /**< PMM missed events due to OOM */
+ mali_bool fatal_power_err; /**< PMM has had a fatal power error? */
+ u32 is_dvfs_active; /**< PMM DVFS activity */
+
+#if (defined(DEBUG) || MALI_STATE_TRACKING)
+ u32 mali_last_pmm_status;
+ u32 mali_new_event_status;
+ u32 mali_pmm_lock_acquired;
+#endif
+
+#if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ u32 messages_sent; /**< Total event messages sent */
+ u32 messages_received; /**< Total event messages received */
+ u32 imessages_sent; /**< Total event internal messages sent */
+ u32 imessages_received; /**< Total event internal messages received */
+#endif
+} _mali_pmm_internal_state_t;
+
+/** @brief Sets that a policy needs a check before processing events
+ *
+ * A timer or something has expired that needs dealing with
+ */
+void malipmm_set_policy_check(void);
+
+/** @brief Update the PMM externally viewable state depending on the current PMM internal state
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the timeout is valid, else MALI_FALSE
+ */
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Returns the core mask from the event data - if applicable
+ *
+ * @param pmm internal PMM state
+ * @param event event message to get the core mask from
+ * @return mask of cores that is relevant to this event message
+ */
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Sort out which cores need to be powered up from the given core mask
+ *
+ * All cores that can be powered up will be put into a pending state
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered up
+ * @return mask of cores that need to be powered up, this can be 0 if all cores
+ * are powered up already
+ */
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Sort out which cores need to be powered down from the given core mask
+ *
+ * All cores that can be powered down will be put into a pending state. If they
+ * can be powered down immediately they will also be acknowledged that they can be
+ * powered down. If the immediate_only flag is set, then only those cores that
+ * can be acknowledged for power down will be put into a pending state.
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered down
+ * @param immediate_only MALI_TRUE means that only cores that can power down now will
+ * be put into a pending state
+ * @return mask of cores that need to be powered down, this can be 0 if all cores
+ * are powered down already
+ */
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only );
+
+/** @brief Cancel an invokation to power down (pmm_invoke_power_down)
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if a call to invoke power down should succeed, or fail
+ *
+ * This will report MALI_FALSE if some of the cores are still active and need
+ * to acknowledge that they are ready to power down
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power down have acknowledged they
+ * can power down, else MALI_FALSE
+ */
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power down
+ *
+ * If all the pending cores have acknowledged they can power down, this will call the
+ * PMU power down function to turn them off
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered down, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if all the pending cores to power up have done so
+ *
+ * This will report MALI_FALSE if some of the cores are still powered off
+ * and have not acknowledged that they have powered up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power up have acknowledged they
+ * are now powered up, else MALI_FALSE
+ */
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power up
+ *
+ * If all the pending cores have acknowledged they have powered up, this will
+ * make the cores start processing jobs again, else this will call the PMU
+ * power up function to turn them on, and the PMM is then expected to wait for an
+ * interrupt to acknowledge the power up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered up, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Set the cores that are now active in the system
+ *
+ * Updates which cores are active and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to active
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that are now idle in the system
+ *
+ * Updates which cores are idle and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to idle
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power down
+ *
+ * Updates which cores have acknowledged the pending power down and are now ready
+ * to be turned off
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power down
+ * @return mask of all the cores that have acknowledged the power down
+ */
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power up
+ *
+ * Updates which cores have acknowledged the pending power up and are now
+ * fully powered and ready to run jobs
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power up
+ * @return mask of all the cores that have acknowledged the power up
+ */
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+
+/** @brief Tries to reset the PMM and PMU hardware to a known state after any fatal issues
+ *
+ * This will try and make all the cores powered up and reset the PMM state
+ * to its initial state after core registration - all cores powered but not
+ * pending or active.
+ * All events in the event queues will be thrown away.
+ *
+ * @note: Any pending power down will be cancelled including the OS calling for power down
+ */
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Save the OS specific data for an OS power up/down event
+ *
+ * @param pmm internal PMM state
+ * @param data OS specific event data
+ */
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data);
+
+/** @brief Retrieve the OS specific data for an OS power up/down event
+ *
+ * This will clear the stored OS data, as well as return it.
+ *
+ * @param pmm internal PMM state
+ * @return OS specific event data that was saved previously
+ */
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm);
+
+
+/** @brief Get a human readable name for the cores in a core mask
+ *
+ * @param core the core mask
+ * @return string containing a name relating to the given core mask
+ */
+const char *pmm_trace_get_core_name( mali_pmm_core_mask core );
+
+/** @} */ /* End group pmmapi_state */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_STATE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h
new file mode 100644
index 00000000000..f5106479e33
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/common/pmm/mali_pmm_system.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_system.h
+ * Defines the power management module system functions
+ */
+
+#ifndef __MALI_PMM_SYSTEM_H__
+#define __MALI_PMM_SYSTEM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_system Power Management Module System Functions
+ *
+ * @{
+ */
+
+extern struct mali_kernel_subsystem mali_subsystem_pmm;
+
+/** @brief Register a core with the PMM, which will power up
+ * the core
+ *
+ * @param core the core to register with the PMM
+ * @return error if the core cannot be powered up
+ */
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core );
+
+/** @brief Unregister a core with the PMM
+ *
+ * @param core the core to unregister with the PMM
+ */
+void malipmm_core_unregister( mali_pmm_core_id core );
+
+/** @brief Acknowledge that a power down is okay to happen
+ *
+ * A core should not be running a job, or be in the idle queue when this
+ * is called.
+ *
+ * @param core the core that can now be powered down
+ */
+void malipmm_core_power_down_okay( mali_pmm_core_id core );
+
+/** @} */ /* End group pmmapi_system */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 00000000000..e9e5e55a082
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c
new file mode 100644
index 00000000000..1c1bf306688
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.c
@@ -0,0 +1,82 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+int mali_dev_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h
new file mode 100644
index 00000000000..5362f88cdbd
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_device_pause_resume.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#if USING_MALI_PMM
+int mali_dev_pause(void);
+int mali_dev_resume(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h
new file mode 100644
index 00000000000..30a6fa041db
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_ioctl.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_IOCTL_H__
+#define __MALI_KERNEL_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_GET_SYSTEM_INFO_SIZE _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO_SIZE, _mali_uk_get_system_info_s *)
+#define MALI_IOC_GET_SYSTEM_INFO _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO, _mali_uk_get_system_info_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, _mali_uk_get_big_block_s *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, _mali_uk_free_big_block_s *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_ABORT_JOB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_ABORT_JOB, _mali_uk_pp_abort_job_s * )
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_ABORT_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_ABORT_JOB, _mali_uk_gp_abort_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_IOCTL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
new file mode 100644
index 00000000000..a5144faae2b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.c
@@ -0,0 +1,565 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory mananger definitions */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+
+/* the mali kernel subsystem types */
+#include "mali_kernel_subsystem.h"
+
+/* A memory subsystem always exists, so no need to conditionally include it */
+#include "mali_kernel_common.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_pm.h"
+
+/* */
+#include "mali_kernel_license.h"
+
+/* Setting this parameter will override memory settings in arch/config.h */
+char *mali_mem = "";
+module_param(mali_mem, charp, S_IRUSR | S_IWUSR | S_IWGRP | S_IROTH); /* rw-r--r-- */
+MODULE_PARM_DESC(mali_mem, "Override mali memory configuration. e.g. 32M@224M or 64M@OS_MEMORY");
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int mali_major = 0;
+module_param(mali_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(mali_major, "Device major number");
+
+int mali_benchmark = 0;
+module_param(mali_benchmark, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_benchmark, "Bypass Mali hardware when non-zero");
+
+extern int mali_hang_check_interval;
+module_param(mali_hang_check_interval, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_hang_check_interval, "Interval at which to check for progress after the hw watchdog has been triggered");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+#if defined(USING_MALI400_L2_CACHE)
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+#endif
+
+struct mali_dev
+{
+ struct cdev cdev;
+#if MALI_LICENSE_IS_GPL
+ struct class * mali_class;
+#endif
+};
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* the mali device */
+static struct mali_dev device;
+
+#if MALI_STATE_TRACKING
+static struct proc_dir_entry *proc_entry;
+static int mali_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data);
+#endif
+
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+/* Linux char file operations provided by the Mali module */
+struct file_operations mali_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+int mali_driver_init(void)
+{
+ int err;
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ err = _mali_dev_platform_register();
+ if (err)
+ {
+ return err;
+ }
+#endif
+#endif
+#endif
+ err = mali_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+ MALI_PRINT(("Failed to initialize driver (error %d)\n", err));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void mali_driver_exit(void)
+{
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ _mali_osk_pmm_dev_activate();
+#endif
+#endif
+#endif
+#endif
+ mali_kernel_destructor();
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ _mali_osk_pmm_dev_idle();
+#endif
+#endif
+#endif
+#endif
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+}
+
+/* called from _mali_osk_init */
+int initialize_kernel_device(void)
+{
+ int err;
+ dev_t dev = 0;
+ if (0 == mali_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0/*first minor*/, 1/*count*/, mali_dev_name);
+ mali_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(mali_major, 0);
+ err = register_chrdev_region(dev, 1/*count*/, mali_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&device, 0, sizeof(device));
+
+ /* initialize our char dev data */
+ cdev_init(&device.cdev, &mali_fops);
+ device.cdev.owner = THIS_MODULE;
+ device.cdev.ops = &mali_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&device.cdev, dev, 1/*count*/);
+
+ if (0 == err)
+ {
+#if MALI_STATE_TRACKING
+ proc_entry = create_proc_entry(mali_dev_name, 0444, NULL);
+ if (proc_entry != NULL)
+ {
+ proc_entry->read_proc = mali_proc_read;
+#endif
+#if MALI_LICENSE_IS_GPL
+ device.mali_class = class_create(THIS_MODULE, mali_dev_name);
+ if (IS_ERR(device.mali_class))
+ {
+ err = PTR_ERR(device.mali_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(device.mali_class, NULL, dev, NULL, mali_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&device.cdev);
+#else
+ return 0;
+#endif
+#if MALI_STATE_TRACKING
+ remove_proc_entry(mali_dev_name, NULL);
+ }
+ else
+ {
+ err = EFAULT;
+ }
+#endif
+ }
+ unregister_chrdev_region(dev, 1/*count*/);
+ }
+
+
+ return err;
+}
+
+/* called from _mali_osk_term */
+void terminate_kernel_device(void)
+{
+ dev_t dev = MKDEV(mali_major, 0);
+
+#if MALI_LICENSE_IS_GPL
+ device_destroy(device.mali_class, dev);
+ class_destroy(device.mali_class);
+#endif
+
+#if MALI_STATE_TRACKING
+ remove_proc_entry(mali_dev_name, NULL);
+#endif
+
+ /* unregister char device */
+ cdev_del(&device.cdev);
+ /* free major */
+ unregister_chrdev_region(dev, 1/*count*/);
+ return;
+}
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ struct mali_session_data * session_data;
+ _mali_uk_mem_mmap_s args = {0, };
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start)) );
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+ args.size = vma->vm_end - vma->vm_start;
+ args.ukk_private = vma;
+
+ /* Call the common mmap handler */
+ MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+ return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void*)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+ if (NULL == (void *)arg)
+ {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch(cmd)
+ {
+ case MALI_IOC_GET_SYSTEM_INFO_SIZE:
+ err = get_system_info_size_wrapper(session_data, (_mali_uk_get_system_info_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_SYSTEM_INFO:
+ err = get_system_info_wrapper(session_data, (_mali_uk_get_system_info_s __user *)arg);
+ break;
+
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+#endif
+
+ case MALI_IOC_MEM_INIT:
+ err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_TERM:
+ err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK:
+ err = mem_get_big_block_wrapper(filp, (_mali_uk_get_big_block_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ err = mem_free_big_block_wrapper(session_data, (_mali_uk_free_big_block_s __user *)arg);
+ break;
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_ABORT_JOB:
+ err = pp_abort_job_wrapper(session_data, (_mali_uk_pp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_ABORT_JOB:
+ err = gp_abort_job_wrapper(session_data, (_mali_uk_gp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+#if MALI_STATE_TRACKING
+static int mali_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = 0;
+
+ MALI_DEBUG_PRINT(1, ("mali_proc_read(page=%p, start=%p, off=%u, count=%d, eof=%p, data=%p\n", page, start, off, count, eof, data));
+
+ if (off > 0)
+ {
+ return 0;
+ }
+
+ if (count < 1024)
+ {
+ return 0;
+ }
+
+ len = sprintf(page + len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += sprintf(page + len, "License: %s\n", MALI_KERNEL_LINUX_LICENSE);
+
+ /*
+ * A more elegant solution would be to gather information from all subsystems and
+ * then report it all in the /proc/mali file, but this would require a bit more work.
+ * Use MALI_PRINT for now so we get the information in the dmesg log at least.
+ */
+ _mali_kernel_core_dump_state();
+
+ return len;
+}
+#endif
+
+
+module_init(mali_driver_init);
+module_exit(mali_driver_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h
new file mode 100644
index 00000000000..969449d8ef4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_linux.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t initialize_kernel_device(void);
+void terminate_kernel_device(void);
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c
new file mode 100644
index 00000000000..aaafc151077
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.c
@@ -0,0 +1,786 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_pm.c
+ * Implementation of the Linux Power Management for Mali GPU kernel driver
+ */
+#undef CONFIG_HAS_EARLYSUSPEND /*ARM will remove .early_suspend support for r2p2_rel*/
+#if USING_MALI_PMM
+#include <linux/sched.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <linux/suspend.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif /* MALI_GPU_UTILIZATION */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+#include "mali_linux_pm_testsuite.h"
+unsigned int pwr_mgmt_status_reg = 0;
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+#if MALI_STATE_TRACKING
+ int is_os_pmm_thread_waiting = -1;
+#endif /* MALI_STATE_TRACKING */
+
+/* kernel should be configured with power management support */
+#ifdef CONFIG_PM
+
+/* License should be GPL */
+#if MALI_LICENSE_IS_GPL
+
+/* Linux kernel major version */
+#define LINUX_KERNEL_MAJOR_VERSION 2
+
+/* Linux kernel minor version */
+#define LINUX_KERNEL_MINOR_VERSION 6
+
+/* Linux kernel development version */
+#define LINUX_KERNEL_DEVELOPMENT_VERSION 29
+
+#ifdef CONFIG_PM_DEBUG
+static const char* const mali_states[_MALI_MAX_DEBUG_OPERATIONS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ [_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB] = "early_suspend_level_disable_framebuffer",
+ [_MALI_DEVICE_LATERESUME] = "late_resume",
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ [_MALI_DVFS_PAUSE_EVENT] = "dvfs_pause",
+ [_MALI_DVFS_RESUME_EVENT] = "dvfs_resume",
+};
+
+#endif /* CONFIG_PM_DEBUG */
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+extern void set_mali_parent_power_domain(struct platform_device* dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_HAS_EARLYSUSPEND
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy);
+
+static struct notifier_block mali_pwr_notif_block = {
+ .notifier_call = mali_pwr_suspend_notifier
+};
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Power management thread pointer */
+struct task_struct *pm_thread;
+
+/* dvfs power management thread */
+struct task_struct *dvfs_pm_thread;
+
+/* is wake up needed */
+short is_wake_up_needed = 0;
+int timeout_fired = 2;
+unsigned int is_mali_pmm_testsuite_enabled = 0;
+
+_mali_device_power_states mali_device_state = _MALI_DEVICE_RESUME;
+_mali_device_power_states mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+_mali_osk_lock_t *lock;
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+const char* const mali_pmm_recording_events[_MALI_DEVICE_MAX_PMM_EVENTS] = {
+ [_MALI_DEVICE_PMM_TIMEOUT_EVENT] = "timeout",
+ [_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS] = "job_scheduling",
+ [_MALI_DEVICE_PMM_REGISTERED_CORES] = "cores",
+
+};
+
+unsigned int mali_timeout_event_recording_on = 0;
+unsigned int mali_job_scheduling_events_recording_on = 0;
+unsigned int is_mali_pmu_present = 0;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/* Function prototypes */
+static int mali_pm_probe(struct platform_device *pdev);
+static int mali_pm_remove(struct platform_device *pdev);
+
+/* Mali device suspend function */
+static int mali_pm_suspend(struct device *dev);
+
+/* Mali device resume function */
+static int mali_pm_resume(struct device *dev);
+
+/* Run time suspend and resume functions */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+static int mali_device_runtime_suspend(struct device *dev);
+static int mali_device_runtime_resume(struct device *dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Early suspend functions */
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void mali_pm_early_suspend(struct early_suspend *mali_dev);
+static void mali_pm_late_resume(struct early_suspend *mali_dev);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* OS suspend and resume callbacks */
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state);
+#else
+static int mali_pm_os_suspend(struct device *dev);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev);
+#else
+static int mali_pm_os_resume(struct device *dev);
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+/* OS Hibernation suspend callback */
+static int mali_pm_os_suspend_on_hibernation(struct device *dev);
+
+/* OS Hibernation resume callback */
+static int mali_pm_os_resume_on_hibernation(struct device *dev);
+
+static void _mali_release_pm(struct device* device);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static const struct dev_pm_ops mali_dev_pm_ops = {
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .runtime_suspend = mali_device_runtime_suspend,
+ .runtime_resume = mali_device_runtime_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .poweroff = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+};
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+struct pm_ext_ops mali_pm_operations = {
+ .base = {
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .poweroff = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+ },
+};
+#endif
+
+static struct platform_driver mali_plat_driver = {
+ .probe = mali_pm_probe,
+ .remove = mali_pm_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+ .pm = &mali_pm_operations,
+#endif
+
+ .driver = {
+ .name = "mali_dev",
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/* Early suspend hooks */
+static struct early_suspend mali_dev_early_suspend = {
+ .suspend = mali_pm_early_suspend,
+ .resume = mali_pm_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+};
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* Mali GPU platform device */
+struct platform_device mali_gpu_device = {
+ .name = "mali_dev",
+ .id = 0,
+ .dev.release = _mali_release_pm
+};
+
+/** This function is called when platform device is unregistered. This function
+ * is necessary when the platform device is unregistered.
+ */
+static void _mali_release_pm(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Platform device removed\n" ));
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+void mali_is_pmu_present(void)
+{
+ int temp = 0;
+ temp = pmu_get_power_up_down_info();
+ if (4095 == temp)
+ {
+ is_mali_pmu_present = 0;
+ }
+ else
+ {
+ is_mali_pmu_present = 1;
+ }
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* MALI_LICENSE_IS_GPL */
+
+#if MALI_LICENSE_IS_GPL
+
+static int mali_wait_for_power_management_policy_event(void)
+{
+ int err = 0;
+ for (; ;)
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ {
+ err = -EINTR;
+ break;
+ }
+ if (is_wake_up_needed == 1)
+ {
+ break;
+ }
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ is_wake_up_needed =0;
+ return err;
+}
+
+/** This function is invoked when mali device is suspended
+ */
+int mali_device_suspend(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being suspended\n" ));
+ _mali_ukk_pmm_event_message(&event);
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 1;
+#endif /* MALI_STATE_TRACKING */
+ err = mali_wait_for_power_management_policy_event();
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 0;
+#endif /* MALI_STATE_TRACKING */
+ return err;
+}
+
+/** This function is called when Operating system wants to power down
+ * the mali GPU device.
+ */
+static int mali_pm_suspend(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+#if MALI_GPU_UTILIZATION
+ mali_utilization_suspend();
+#endif /* MALI_GPU_UTILIZATION */
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND)
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ || mali_device_state == (_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+#else
+ )
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ mali_device_state = _MALI_DEVICE_SUSPEND_IN_PROGRESS;
+ err = mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_SUSPEND;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state)
+#else
+static int mali_pm_os_suspend(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_HAS_EARLYSUSPEND
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy)
+{
+ int err = 0;
+ switch (event)
+ {
+ case PM_SUSPEND_PREPARE:
+ err = mali_pm_suspend(NULL);
+ break;
+
+ case PM_POST_SUSPEND:
+ err = mali_pm_resume(NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/** This function is called when mali GPU device is to be resumed.
+ */
+int mali_device_resume(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being resumed\n" ));
+ _mali_ukk_pmm_event_message(&event);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power up event is scheduled\n" ));
+
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ err = mali_wait_for_power_management_policy_event();
+
+#if MALI_STATE_TRACKING
+ is_os_pmm_thread_waiting = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ return err;
+}
+
+/** This function is called when mali GPU device is to be resumed
+ */
+
+static int mali_pm_resume(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ err = mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_device_state = _MALI_DEVICE_RESUME;
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev)
+#else
+static int mali_pm_os_resume(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+static int mali_pm_os_suspend_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+
+static int mali_pm_os_resume_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+/** This function is called when runtime suspend of mali device is required.
+ */
+static int mali_device_runtime_suspend(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time suspended \n" ));
+ return 0;
+}
+
+/** This function is called when runtime resume of mali device is required.
+ */
+static int mali_device_runtime_resume(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time Resumed \n" ));
+ return 0;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+
+/* This function is called from android framework.
+ */
+static void mali_pm_early_suspend(struct early_suspend *mali_dev)
+{
+ switch(mali_dev->level)
+ {
+ /* Screen should be turned off but framebuffer will be accessible */
+ case EARLY_SUSPEND_LEVEL_BLANK_SCREEN:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Screen is off\n" ));
+ break;
+
+ case EARLY_SUSPEND_LEVEL_STOP_DRAWING:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Suspend level stop drawing\n" ));
+ break;
+
+ /* Turn off the framebuffer. In our case No Mali GPU operation */
+ case EARLY_SUSPEND_LEVEL_DISABLE_FB:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Suspend level Disable framebuffer\n" ));
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+#if MALI_GPU_UTILIZATION
+ mali_utilization_suspend();
+#endif /* MALI_GPU_UTILIZATION */
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return;
+ }
+ mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Suspend Mode\n" ));
+ break;
+ }
+}
+
+/* This function is invoked from android framework when mali device needs to be
+ * resumed.
+ */
+static void mali_pm_late_resume(struct early_suspend *mali_dev)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return;
+ }
+ if (mali_device_state == _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB)
+ {
+ mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ mali_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_DEBUG
+
+/** This function is used for debugging purposes when the user want to see
+ * which power management operations are supported for
+ * mali device.
+ */
+static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char *str = buf;
+#if !MALI_POWER_MGMT_TEST_SUITE
+ int pm_counter = 0;
+ for (pm_counter = 0; pm_counter<_MALI_MAX_DEBUG_OPERATIONS; pm_counter++)
+ {
+ str += sprintf(str, "%s ", mali_states[pm_counter]);
+ }
+#else
+ str += sprintf(str, "%d ",pwr_mgmt_status_reg);
+#endif
+ if (str != buf)
+ {
+ *(str-1) = '\n';
+ }
+ return (str-buf);
+}
+
+/** This function is called when user wants to suspend the mali GPU device in order
+ * to simulate the power up and power down events.
+ */
+static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int err = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend mali_dev;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ int test_flag_dvfs = 0;
+ pwr_mgmt_status_reg = 0;
+ mali_is_pmu_present();
+
+#endif
+ if (!strncmp(buf,mali_states[_MALI_DEVICE_SUSPEND],strlen(mali_states[_MALI_DEVICE_SUSPEND])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI suspend Power operation is scheduled\n" ));
+ err = mali_pm_suspend(NULL);
+ }
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Device get number of registerd cores\n" ));
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ return count;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI timeout event recording is enabled\n" ));
+ mali_timeout_event_recording_on = 1;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Job scheduling events recording is enabled\n" ));
+ mali_job_scheduling_events_recording_on = 1;
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_RESUME],strlen(mali_states[_MALI_DEVICE_RESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ err = mali_pm_resume(NULL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB],strlen(mali_states[_MALI_DEVICE_EARLYSUSPEND_DISABLE_FB])))
+ {
+ mali_dev.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Android early suspend operation is scheduled\n" ));
+ mali_pm_early_suspend(&mali_dev);
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_LATERESUME],strlen(mali_states[_MALI_DEVICE_LATERESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ mali_pm_late_resume(NULL);
+ }
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_PAUSE_EVENT],strlen(mali_states[_MALI_DVFS_PAUSE_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Pause Power operation is scheduled\n" ));
+ err = mali_dev_pause();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_RESUME_EVENT],strlen(mali_states[_MALI_DVFS_RESUME_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Resume Power operation is scheduled\n" ));
+ err = mali_dev_resume();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Power Mode Operation selected\n" ));
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (test_flag_dvfs == 1)
+ {
+ if (err)
+ {
+ pwr_mgmt_status_reg = 2;
+ }
+ else
+ {
+ pwr_mgmt_status_reg = 1;
+ }
+ }
+ else
+ {
+ if (1 == is_mali_pmu_present)
+ {
+ pwr_mgmt_status_reg = pmu_get_power_up_down_info();
+ }
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ return count;
+}
+
+/* Device attribute file */
+static DEVICE_ATTR(file, 0644, show_file, store_file);
+#endif /* CONFIG_PM_DEBUG */
+
+static int mali_pm_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ device_remove_file(&mali_gpu_device.dev, &dev_attr_file);
+#endif /* CONFIG_PM_DEBUG */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ pm_runtime_disable(&pdev->dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ return 0;
+}
+
+/** This function is called when the device is probed */
+static int mali_pm_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ int err;
+ err = device_create_file(&mali_gpu_device.dev, &dev_attr_file);
+ if (err)
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Error in creating device file\n" ));
+ }
+#endif /* CONFIG_PM_DEBUG */
+ return 0;
+}
+
+/** This function is called when Mali GPU device is initialized
+ */
+int _mali_dev_platform_register(void)
+{
+ int err;
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ set_mali_parent_power_domain(&mali_gpu_device);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ err = register_pm_notifier(&mali_pwr_notif_block);
+ if (err)
+ {
+ return err;
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+ err = platform_device_register(&mali_gpu_device);
+ lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)( _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 0);
+ if (!err)
+ {
+ err = platform_driver_register(&mali_plat_driver);
+ if (!err)
+ {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ register_early_suspend(&mali_dev_early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ }
+ else
+ {
+ _mali_osk_lock_term(lock);
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+ platform_device_unregister(&mali_gpu_device);
+ }
+ }
+ return err;
+}
+
+/** This function is called when Mali GPU device is unloaded
+ */
+void _mali_dev_platform_unregister(void)
+{
+ _mali_osk_lock_term(lock);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&mali_dev_early_suspend);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_PM_RUNTIME */
+
+ platform_driver_unregister(&mali_plat_driver);
+ platform_device_unregister(&mali_gpu_device);
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+
+#if MALI_STATE_TRACKING
+void mali_pmm_dump_os_thread_state( void )
+{
+ MALI_PRINTF(("\nOSPMM::The OS PMM thread state is...%d",is_os_pmm_thread_waiting));
+}
+#endif /* MALI_STATE_TRACKING */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h
new file mode 100644
index 00000000000..6e879fea447
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_kernel_pm.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PM_H__
+#define __MALI_KERNEL_PM_H__
+
+#ifdef USING_MALI_PMM
+int _mali_dev_platform_register(void);
+void _mali_dev_platform_unregister(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_KERNEL_PM_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c
new file mode 100644
index 00000000000..3d60d18e005
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_dvfs_pause_resume.c
@@ -0,0 +1,72 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_linux_dvfs_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_dvfs_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_dvfs_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_dvfs_pause);
+
+int mali_dev_dvfs_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) || (mali_device_state == _MALI_DEVICE_SUSPEND_IN_PROGRESS)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_dvfs_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h
new file mode 100644
index 00000000000..614d5e1deee
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm.h
@@ -0,0 +1,57 @@
+
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_LINUX_PM_H__
+#define __MALI_LINUX_PM_H__
+
+#if USING_MALI_PMM
+
+#ifdef CONFIG_PM
+/* Number of power states supported for making power up and down */
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND, /* Suspend */
+ _MALI_DEVICE_RESUME, /* Resume */
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ _MALI_DEVICE_EARLYSUSPEND_DISABLE_FB, /* Early suspend */
+ _MALI_DEVICE_LATERESUME, /* Late resume */
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ _MALI_DEVICE_SUSPEND_IN_PROGRESS, /* Suspend in progress */
+ _MALI_DEVICE_MAX_POWER_STATES, /* Maximum power states */
+} _mali_device_power_states;
+
+/* Number of DVFS events */
+typedef enum
+{
+ _MALI_DVFS_PAUSE_EVENT = _MALI_DEVICE_MAX_POWER_STATES-1, /* DVFS Pause event */
+ _MALI_DVFS_RESUME_EVENT, /* DVFS Resume event */
+ _MALI_MAX_DEBUG_OPERATIONS,
+} _mali_device_dvfs_events;
+
+extern _mali_device_power_states mali_device_state;
+extern _mali_device_power_states mali_dvfs_device_state;
+extern _mali_osk_lock_t *lock;
+extern short is_wake_up_needed;
+extern int timeout_fired;
+extern struct platform_device mali_gpu_device;
+
+/* dvfs pm thread */
+extern struct task_struct *dvfs_pm_thread;
+
+/* Power management thread */
+extern struct task_struct *pm_thread;
+
+int mali_device_suspend(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_device_resume(u32 event_id, struct task_struct **pwr_mgmt_thread);
+
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_H___ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h
new file mode 100644
index 00000000000..c80b0b0a5e3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_linux_pm_testsuite.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_LINUX_PM_TESTSUITE_H__
+#define __MALI_LINUX_PM_TESTSUITE_H__
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+
+typedef enum
+{
+ _MALI_DEVICE_PMM_TIMEOUT_EVENT,
+ _MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS,
+ _MALI_DEVICE_PMM_REGISTERED_CORES,
+ _MALI_DEVICE_MAX_PMM_EVENTS
+
+} _mali_device_pmm_recording_events;
+
+extern unsigned int mali_timeout_event_recording_on;
+extern unsigned int mali_job_scheduling_events_recording_on;
+extern unsigned int pwr_mgmt_status_reg;
+extern unsigned int is_mali_pmm_testsuite_enabled;
+extern unsigned int is_mali_pmu_present;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_TESTSUITE_H__ */
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c
new file mode 100644
index 00000000000..cc029c4a8f8
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_atomics.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+ MALI_IGNORE(atom);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c
new file mode 100644
index 00000000000..b60bf825897
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * @file mali_osk_specific.c
+ * Implementation of per-OS Kernel level specifics
+ */
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args )
+{
+ /* args->ctx ignored here; args->ukk_private required instead */
+ /* we need to lock the mmap semaphore before calling the do_mmap function */
+ down_write(&current->mm->mmap_sem);
+
+ args->mapping = (void __user *)do_mmap(
+ (struct file *)args->ukk_private,
+ 0, /* start mapping from any address after NULL */
+ args->size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ args->phys_addr
+ );
+
+ /* and unlock it after the call */
+ up_write(&current->mm->mmap_sem);
+
+ /* No cookie required here */
+ args->cookie = 0;
+ /* uku_private meaningless, so zero */
+ args->uku_private = NULL;
+
+ if ( (NULL == args->mapping) || IS_ERR((void *)args->mapping) )
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Success */
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args )
+{
+ /* args->ctx and args->cookie ignored here */
+
+ if ((NULL != current) && (NULL != current->mm))
+ {
+ /* remove mapping of mali memory from the process' view */
+ /* lock mmap semaphore before call */
+ /* lock mmap_sem before calling do_munmap */
+ down_write(&current->mm->mmap_sem);
+ do_munmap(
+ current->mm,
+ (unsigned long)args->mapping,
+ args->size
+ );
+ /* and unlock after call */
+ up_write(&current->mm->mmap_sem);
+ MALI_DEBUG_PRINT(5, ("unmapped\n"));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Freeing of a big block while no user process attached, assuming crash cleanup in progress\n"));
+ }
+
+ return _MALI_OSK_ERR_OK; /* always succeeds */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h
new file mode 100644
index 00000000000..f87739bb8f9
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_indir_mmap.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_INDIR_MMAP_H__
+#define __MALI_OSK_INDIR_MMAP_H__
+
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Linux specific means for calling _mali_ukk_mem_mmap/munmap
+ *
+ * The presence of _MALI_OSK_SPECIFIC_INDIRECT_MMAP indicates that
+ * _mali_osk_specific_indirect_mmap and _mali_osk_specific_indirect_munmap
+ * should be used instead of _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * The arguments are the same as _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * In ALL operating system other than Linux, it is expected that common code
+ * should be able to call _mali_ukk_mem_mmap/_mali_ukk_mem_munmap directly.
+ * Such systems should NOT define _MALI_OSK_SPECIFIC_INDIRECT_MMAP.
+ */
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args );
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_INDIR_MMAP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c
new file mode 100644
index 00000000000..1a4bbcecdce
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_irq.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+ _mali_osk_irq_bhandler_t bhandler;
+ struct work_struct work_queue_irq_handle; /* Workqueue for the bottom half of the IRQ-handling. This job is activated when this core gets an IRQ.*/
+} mali_osk_irq_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *pmm_wq=NULL;
+#endif
+
+typedef void (*workqueue_func_t)(void *);
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work );
+#else
+static void irq_handler_bottom_half ( void * input );
+#endif
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description )
+{
+ mali_osk_irq_object_t *irq_object;
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) return NULL;
+
+ /* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+ /* New syntax: INIT_WORK( struct work_struct *work, void (*function)(struct work_struct *)) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half);
+#else
+ /* Old syntax: INIT_WORK( struct work_struct *work, void (*function)(void *), void *data) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half, irq_object);
+#endif /* defined(INIT_DELAYED_WORK) */
+
+ if (-1 == irqnum)
+ {
+ /* Probe for IRQ */
+ if ( (NULL != trigger_func) && (NULL != ack_func) )
+ {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do
+ {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(data);
+ }
+ while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ }
+ else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum)
+ {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->bhandler = bhandler;
+ irq_object->data = data;
+
+ /* Is this a real IRQ handler we need? */
+ if (!mali_benchmark && irqnum != _MALI_OSK_IRQ_NUMBER_FAKE && irqnum != _MALI_OSK_IRQ_NUMBER_PMM)
+ {
+ if (-1 == irqnum)
+ {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, IRQF_SHARED, description, irq_object))
+ {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+ }
+
+#if MALI_LICENSE_IS_GPL
+ if ( _MALI_OSK_IRQ_NUMBER_PMM == irqnum )
+ {
+ pmm_wq = create_singlethread_workqueue("mali-pmm-wq");
+ }
+#endif
+
+ return irq_object;
+}
+
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+#if MALI_LICENSE_IS_GPL
+ if ( irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ queue_work(pmm_wq,&irq_object->work_queue_irq_handle);
+ }
+ else
+ {
+#endif
+ schedule_work(&irq_object->work_queue_irq_handle);
+#if MALI_LICENSE_IS_GPL
+ }
+#endif
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+
+#if MALI_LICENSE_IS_GPL
+ if(irq_object->irqnum == _MALI_OSK_IRQ_NUMBER_PMM )
+ {
+ flush_workqueue(pmm_wq);
+ destroy_workqueue(pmm_wq);
+ }
+#endif
+ if (!mali_benchmark)
+ {
+ free_irq(irq_object->irqnum, irq_object);
+ }
+ kfree(irq_object);
+ flush_scheduled_work();
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (irq_object->uhandler(irq_object->data) == _MALI_OSK_ERR_OK)
+ {
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* Is executed when an interrupt occur on one core */
+/* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work )
+#else
+static void irq_handler_bottom_half ( void * input )
+#endif
+{
+ mali_osk_irq_object_t *irq_object;
+
+#if defined(INIT_DELAYED_WORK)
+ irq_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_irq_object_t, work_queue_irq_handle);
+#else
+ if ( NULL == input )
+ {
+ MALI_PRINT_ERROR(("IRQ: Null pointer! Illegal!"));
+ return; /* Error */
+ }
+ irq_object = (mali_osk_irq_object_t *) input;
+#endif
+
+ irq_object->bhandler(irq_object->data);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c
new file mode 100644
index 00000000000..11285efc682
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_locks.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN, /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ, /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX, /* Interruptable, use up()/down_interruptable() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT, /* Non-Interruptable, use up()/down() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {up,down}{read,write}() */
+
+ /* Linux supports, but we do not support:
+ * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+ */
+
+ /* Linux does not support:
+ * One-locks, of any sort - no optimization for this fact will be made.
+ */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+ _mali_osk_internal_locktype type;
+ unsigned long flags;
+ union
+ {
+ spinlock_t spinlock;
+ struct semaphore sema;
+ struct rw_semaphore rw_sema;
+ } obj;
+ MALI_DEBUG_CODE(
+ /** original flags for debug checking */
+ _mali_osk_lock_flags_t orig_flags;
+ _mali_osk_lock_mode_t locked_as;
+ ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+ _mali_osk_lock_t *lock = NULL;
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_READERWRITER
+ | _MALI_OSK_LOCKFLAG_ORDERED
+ | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+ /* Spinlocks are always non-interruptable */
+ MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+ || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+ /* Parameter initial SBZ - for future expansion */
+ MALI_DEBUG_ASSERT( 0 == initial );
+
+ lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+ if ( NULL == lock )
+ {
+ return lock;
+ }
+
+ /* Determine type of mutex: */
+ /* defaults to interruptable mutex if no flags are specified */
+
+ if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+ {
+ /* Non-interruptable Spinlocks override all others */
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+ lock->flags = 0;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+ && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+ init_rwsem( &lock->obj.rw_sema );
+ }
+ else
+ {
+ /* Usual mutex types */
+ if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+ }
+ else
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+ }
+
+ /* Initially unlocked */
+ sema_init( &lock->obj.sema, 1 );
+ }
+
+ MALI_DEBUG_CODE(
+ /* Debug tracking of flags */
+ lock->orig_flags = flags;
+ lock->locked_as = _MALI_OSK_LOCKMODE_UNDEF;
+ ); /* MALI_DEBUG_CODE */
+
+ return lock;
+}
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_lock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_lock_irqsave(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ if ( down_interruptible(&lock->obj.sema) )
+ {
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ down(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ down_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ down_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+
+ /* DEBUG tracking of previously locked state - occurs after lock obtained */
+ MALI_DEBUG_CODE(
+ if ( _MALI_OSK_ERR_OK == err )
+ {
+ /* Assert that this is not currently locked */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_UNDEF == lock->locked_as );
+
+ lock->locked_as = mode;
+ }
+ ); /* MALI_DEBUG_CODE */
+
+ return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ /* For DEBUG only, assert that we previously locked this, and in the same way (RW/RO) */
+ MALI_DEBUG_ASSERT( mode == lock->locked_as );
+
+ /* DEBUG tracking of previously locked state - occurs before lock released */
+ MALI_DEBUG_CODE( lock->locked_as = _MALI_OSK_LOCKMODE_UNDEF );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_unlock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ /* FALLTHROUGH */
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ up(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ up_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ up_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ /* For DEBUG only, assert that this is not currently locked */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_UNDEF == lock->locked_as );
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
new file mode 100644
index 00000000000..6454709caaf
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+ int references;
+ u32 cookie;
+} mali_vma_usage_tracker;
+
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+ struct AllocationList *next;
+ u32 offset;
+ u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+ struct vm_area_struct *vma;
+ struct AllocationList *list;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+spinlock_t allocation_list_spinlock;
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+ static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+ static int pre_allocated_memory_size_max = 6 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+ .open = mali_kernel_memory_vma_open,
+ .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+ .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+
+void mali_osk_low_level_mem_init(void)
+{
+ spin_lock_init( &allocation_list_spinlock );
+ pre_allocated_memory = (AllocationList*) NULL ;
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+ while ( NULL != pre_allocated_memory )
+ {
+ AllocationList *item;
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+ }
+ pre_allocated_memory_size_current = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+ struct page *new_page;
+ u32 linux_phys_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if ( NULL == new_page )
+ {
+ return 0;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+ struct page *unmap_page;
+
+ #if 1
+ dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ #endif
+
+ unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+ MALI_DEBUG_ASSERT_POINTER( unmap_page );
+ __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+ AllocationList *item = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory )
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = pre_allocated_memory->next;
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return item;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ item = _mali_osk_malloc( sizeof(AllocationList) );
+ if ( NULL == item)
+ {
+ return NULL;
+ }
+
+ item->physaddr = _kernel_page_allocate();
+ if ( 0 == item->physaddr )
+ {
+ /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+ _mali_osk_free( item );
+ return NULL;
+ }
+ return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+ {
+ item->next = pre_allocated_memory;
+ pre_allocated_memory = item;
+ pre_allocated_memory_size_current += PAGE_SIZE;
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker->references++;
+
+ return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+ _mali_uk_mem_munmap_s args = {0, };
+ mali_memory_allocation * descriptor;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+ BUG_ON(!vma_usage_tracker);
+ BUG_ON(0 == vma_usage_tracker->references);
+
+ vma_usage_tracker->references--;
+
+ if (0 != vma_usage_tracker->references)
+ {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+ return;
+ }
+
+ /** @note args->context unused, initialized to 0.
+ * Instead, we use the memory_session from the cookie */
+
+ descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+ args.cookie = (u32)descriptor;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ _mali_ukk_mem_munmap( &args );
+
+ /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+ * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+
+void _mali_osk_mem_barrier( void )
+{
+ mb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+ void * virt;
+ MALI_DEBUG_ASSERT_POINTER( phys );
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+ MALI_DEBUG_ASSERT( 0 != size );
+
+ /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+ * 2 to 14 MiB is available. This should be enough for the page tables, which
+ * currently is the only user of this function. */
+ virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
+
+ MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+ MALI_DEBUG_PRINT(1, ("Solution: When configuring and building linux kernel, set CONSISTENT_DMA_SIZE to be 14 MB.\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+ MALI_DEBUG_ASSERT( 0 != size );
+ MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+ dma_free_coherent(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+ release_mem_region(phys, size);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+ return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+ wmb();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct *vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ /* Re-write the process_addr_mapping_info */
+ mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+ if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+ if (NULL == vma_usage_tracker)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+ _mali_osk_free( mappingInfo );
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo->vma = vma;
+ descriptor->process_addr_mapping_info = mappingInfo;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+ /* list member is already NULL */
+
+ /*
+ set some bits which indicate that:
+ The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+ The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+ vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+ vma->vm_private_data = vma_usage_tracker;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = mappingInfo->vma;
+
+ MALI_DEBUG_ASSERT_POINTER( vma );
+
+ /* ASSERT that there are no allocations on the list. Unmap should've been
+ * called on all OS allocations. */
+ MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ _mali_osk_free(vma_usage_tracker);
+
+ _mali_osk_free( mappingInfo );
+ return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+ struct vm_area_struct *vma;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+ if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+ *phys_addr, size, descriptor->mapping, offset));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ vma = mappingInfo->vma;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+ {
+ _mali_osk_errcode_t ret;
+ AllocationList *alloc_item;
+ u32 linux_phys_frame_num;
+
+ alloc_item = _allocation_list_item_get();
+
+ linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+ ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+ if ( ret != _MALI_OSK_ERR_OK)
+ {
+ _allocation_list_item_release(alloc_item);
+ return ret;
+ }
+
+ /* Put our alloc_item into the list of allocations on success */
+ alloc_item->next = mappingInfo->list;
+ alloc_item->offset = offset;
+
+ /*alloc_item->physaddr = linux_phys_addr;*/
+ mappingInfo->list = alloc_item;
+
+ /* Write out new physical address on success */
+ *phys_addr = alloc_item->physaddr;
+
+ return ret;
+ }
+
+ /* Otherwise, Use the supplied physical address */
+
+ /* ASSERT that supplied phys_addr is page aligned */
+ MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ if (NULL == descriptor->mapping) return;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+ size, descriptor->mapping, offset));
+ return;
+ }
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+ {
+ /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+ * so needs to be unmapped
+ */
+ while (size)
+ {
+ /* First find the allocation in the list of allocations */
+ AllocationList *alloc = mappingInfo->list;
+ AllocationList **prev = &(mappingInfo->list);
+ while (NULL != alloc && alloc->offset != offset)
+ {
+ prev = &(alloc->next);
+ alloc = alloc->next;
+ }
+ if (alloc == NULL) {
+ MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ continue;
+ }
+
+ _kernel_page_release(alloc->physaddr);
+
+ /* Remove the allocation from the list */
+ *prev = alloc->next;
+ _mali_osk_free( alloc );
+
+ /* Move onto the next allocation */
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+ }
+
+ /* Linux does the right thing as part of munmap to remove the mapping */
+
+ return;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c
new file mode 100644
index 00000000000..694d8eeaabc
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_mali.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Added support for overriding memory settings in arch_configuration using
+ * mali_mem module parameter.
+ *
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "mali_kernel_linux.h" /* exports initialize/terminate_kernel_device() definition of mali_osk_low_level_mem_init() and term */
+#include "arch/config.h" /* contains the configuration of the arch we are compiling for */
+
+extern char* mali_mem;
+
+/* is called from mali_kernel_constructor in common code */
+_mali_osk_errcode_t _mali_osk_init( void )
+{
+ if (0 != initialize_kernel_device()) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ mali_osk_low_level_mem_init();
+
+ MALI_SUCCESS;
+}
+
+/* is called from mali_kernel_deconstructor in common code */
+void _mali_osk_term( void )
+{
+ mali_osk_low_level_mem_term();
+ terminate_kernel_device();
+}
+
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources )
+{
+ *num_resources = sizeof(arch_configuration) / sizeof(arch_configuration[0]);
+ *arch_config = arch_configuration;
+
+ /* override the MEMORY resource if a value has been supplied from the command line */
+ if ('\0' != mali_mem[0]) {
+ char *p = mali_mem;
+ _mali_osk_resource_type_t mem_type = MEMORY;
+ unsigned long mem_base = 0;
+ unsigned long mem_size = memparse(p, &p);
+ if (*p == '@') {
+ if ((*(p + 1) == 'O') || (*(p + 1) == 'o')) { /* as in OS. e.g. mali_mem=64M@OS_MEMORY */
+ mem_type = OS_MEMORY;
+ mem_base = 0;
+ } else { /* parse the base address */
+ mem_type = MEMORY;
+ mem_base = memparse(p + 1, &p);
+ }
+ }
+
+ /* change the first memory entry in the architecture config. */
+ if (0 < mem_size) {
+ int i;
+ for (i = 0; i < *num_resources; ++i) {
+ if ((MEMORY == arch_configuration[i].type) ||
+ (OS_MEMORY == arch_configuration[i].type)) {
+ MALI_DEBUG_PRINT( 1, ("Overriding arch resource[%d] :\n",i));
+ MALI_DEBUG_PRINT( 1, ("Type: %s, base: %x, size %x\n",
+ (OS_MEMORY==mem_type?"OS_MEMORY":"MEMORY"),mem_base,mem_size));
+ arch_configuration[i].type = mem_type;
+ arch_configuration[i].base = mem_base;
+ arch_configuration[i].size = mem_size;
+ break;
+ }
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources )
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c
new file mode 100644
index 00000000000..2f7fc15847b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_math.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+ return 32-fls(input);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c
new file mode 100644
index 00000000000..bf6679f7bac
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_memory.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c
new file mode 100644
index 00000000000..3afd2da736c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_misc.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ return (u32)current->pid;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c
new file mode 100644
index 00000000000..2efc140f60a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_notification.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+ struct semaphore mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+ _mali_osk_notification_queue_t * result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ sema_init(&result->mutex, 1);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size, GFP_KERNEL );
+ if (NULL == notification)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size)
+ {
+ notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ }
+ else
+ {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* Remove from the list */
+ list_del(&notification->list);
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* lock queue access */
+ down(&queue->mutex);
+ /* add to list */
+ list_add_tail(&notification->list, &queue->head);
+ /* unlock the queue */
+ up(&queue->mutex);
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+static int _mali_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ int ret;
+
+ down(&queue->mutex);
+ ret = list_empty(&queue->head);
+ up(&queue->mutex);
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ return _mali_notification_queue_is_empty(queue) ? MALI_TRUE : MALI_FALSE;
+}
+#endif
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+ down(&queue->mutex);
+
+ if (!list_empty(&queue->head))
+ {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ up(&queue->mutex);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( result );
+
+ /* default result */
+ *result = NULL;
+
+ while (_MALI_OSK_ERR_OK != _mali_osk_notification_queue_dequeue(queue, result))
+ {
+ if (wait_event_interruptible(queue->receive_queue, !_mali_notification_queue_is_empty(queue)))
+ {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c
new file mode 100644
index 00000000000..04fcd4572df
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_pm.c
@@ -0,0 +1,195 @@
+/**
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/platform_device.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_pm_testsuite.h"
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifdef CONFIG_PM_RUNTIME
+static int is_runtime =0;
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+#ifdef CONFIG_PM
+unsigned int mali_pmm_events_triggered_mask = 0;
+#endif /* CONFIG_PM */
+
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id mali_pmm_event)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+
+ switch (mali_pmm_event)
+ {
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<0);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<1);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<2);
+ mali_job_scheduling_events_recording_on = 0;
+ pwr_mgmt_status_reg = mali_pmm_events_triggered_mask;
+ }
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ if (mali_timeout_event_recording_on == 1)
+ {
+ pwr_mgmt_status_reg = (1<<3);
+ mali_timeout_event_recording_on = 0;
+ }
+ break;
+
+ default:
+
+ break;
+
+ }
+#endif /* CONFIG_PM */
+
+#endif /* MALI_LICENSE_IS_GPL */
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/** This function is called when the Mali device has completed power up
+ * operation.
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK Power up Done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is called when the Mali device has completed power down
+ * operation.
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (is_mali_pmu_present == 0)
+ {
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power down Done\n" ));
+ return;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is invoked when mali device is idle.
+*/
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle(void)
+{
+ _mali_osk_errcode_t err = 0;
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ err = pm_runtime_put_sync(&(mali_gpu_device.dev));
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_idle\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+ return err;
+}
+
+/** This funtion is invoked when mali device needs to be activated.
+*/
+void _mali_osk_pmm_dev_activate(void)
+{
+
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ int err = 0;
+ if(is_runtime == 0)
+ {
+ pm_suspend_ignore_children(&(mali_gpu_device.dev), true);
+ pm_runtime_enable(&(mali_gpu_device.dev));
+ pm_runtime_get_sync(&(mali_gpu_device.dev));
+ is_runtime = 1;
+ }
+ else
+ {
+ err = pm_runtime_get_sync(&(mali_gpu_device.dev));
+ }
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_activate\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(dvfs_pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK DVFS Operation done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h
new file mode 100644
index 00000000000..54acfdd138c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_specific.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c
new file mode 100644
index 00000000000..aa7962380e6
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks( u32 ms )
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms( u32 ticks )
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount( void )
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c
new file mode 100644
index 00000000000..3dfba76bcb3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_timers.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = _mali_osk_time_tickcount() + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), expiry_tick);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c
new file mode 100644
index 00000000000..0f745492613
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_core.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs)
+{
+ _mali_uk_get_system_info_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_system_info_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs)
+{
+ _mali_uk_get_system_info_s kargs;
+ _mali_osk_errcode_t err;
+ _mali_system_info *system_info_user;
+ _mali_system_info *system_info_kernel;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.system_info, &uargs->system_info)) return -EFAULT;
+ if (0 != get_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ /* A temporary kernel buffer for the system_info datastructure is passed through the system_info
+ * member. The ukk_private member will point to the user space destination of this buffer so
+ * that _mali_ukk_get_system_info() can correct the pointers in the system_info correctly
+ * for user space.
+ */
+ system_info_kernel = kmalloc(kargs.size, GFP_KERNEL);
+ if (NULL == system_info_kernel) return -EFAULT;
+
+ system_info_user = kargs.system_info;
+ kargs.system_info = system_info_kernel;
+ kargs.ukk_private = (u32)system_info_user;
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_system_info(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ kfree(system_info_kernel);
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(system_info_user, system_info_kernel, kargs.size))
+ {
+ kfree(system_info_kernel);
+ return -EFAULT;
+ }
+
+ kfree(system_info_kernel);
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+ {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ }
+ else
+ {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c
new file mode 100644
index 00000000000..a6f355fd459
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_gp.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_uk_gp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs)
+{
+ _mali_uk_gp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_gp_abort_job(&kargs);
+
+ return 0;
+}
+
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
new file mode 100644
index 00000000000..3f67a1e3328
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_mem.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+ _mali_uk_init_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_init_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+ if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+ return 0;
+
+mem_init_rollback:
+ {
+ _mali_uk_term_mem_s kargs;
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+ _mali_uk_term_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ kargs.buffer = _mali_osk_malloc(kargs.size);
+ if (NULL == kargs.buffer)
+ {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_free(kargs.buffer);
+ return rc;
+}
+
+
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument )
+{
+ _mali_uk_get_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ return -EFAULT;
+ }
+
+ /* This interface inserts something into the ukk_private word */
+ uk_args.ukk_private = (u32)filp;
+ uk_args.ctx = filp->private_data;
+ err_code = _mali_ukk_get_big_block( &uk_args );
+
+ /* Do not leak the private word back into user space */
+ uk_args.ukk_private = 0;
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ return map_errcode(err_code);
+ }
+
+ /* From this point on, we must roll-back any failing action to preserve the
+ * meaning of the U/K interface (e.g. when excluded) */
+
+ /* transfer response back to user space */
+ if ( 0 != copy_to_user(argument, &uk_args, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ /* Roll-back - the _mali_uk_get_big_block call succeeded, so all
+ * values in uk_args will be correct */
+ _mali_uk_free_big_block_s uk_args_rollback = {0, };
+
+ uk_args_rollback.ctx = uk_args.ctx;
+ uk_args_rollback.cookie = uk_args.cookie;
+ err_code = _mali_ukk_free_big_block( &uk_args_rollback );
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ /* error in DEBUG and RELEASE */
+ MALI_PRINT_ERROR( ("Failed to rollback get_big_block: %.8X\n", (u32)err_code) );
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_free_big_block_wrapper(struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument)
+{
+ _mali_uk_free_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL );
+
+ /* get call arguments from user space. get_user returns 0 on success */
+ if ( 0 != get_user(uk_args.cookie, &argument->cookie) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_free_big_block( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c
new file mode 100644
index 00000000000..f77a177d865
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_pp.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_uk_pp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_pp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_pp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.returned_user_job_ptr, &uargs->returned_user_job_ptr) ||
+ 0 != put_user(kargs.status, &uargs->status))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs)
+{
+ _mali_uk_pp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_abort_job(&kargs);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c
new file mode 100644
index 00000000000..636bd03580e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_profiling.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c
new file mode 100644
index 00000000000..965ee411535
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_vsync.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h
new file mode 100644
index 00000000000..54e3f656b37
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_ukk_wrappers.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs);
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs);
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument );
+int mem_free_big_block_wrapper( struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c
new file mode 100644
index 00000000000..36301a93a2c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/default/mali_platform.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c
new file mode 100644
index 00000000000..b8c47094177
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali-runtimepm/mali_platform.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+#include "mali_pmm.h"
+
+static int is_run_time = 0;
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ if(is_run_time == 1)
+ {
+ _mali_osk_pmm_dev_idle();
+ is_run_time =0;
+ }
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ if(is_run_time == 0)
+ {
+ _mali_osk_pmm_dev_activate();
+ is_run_time = 1;
+ }
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c
new file mode 100644
index 00000000000..cf95b8ae09e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali400-pmu/mali_platform.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#if USING_MALI_PMM
+
+#include "mali_pmm.h"
+
+/* Internal test on/off */
+#define PMU_TEST 0
+
+/** @brief PMU hardware info
+ */
+typedef struct platform_pmu
+{
+ u32 reg_base_addr; /**< PMU registers base address */
+ u32 reg_size; /**< PMU registers size */
+ const char *name; /**< PMU name */
+ u32 irq_num; /**< PMU irq number */
+
+ mali_io_address reg_mapped; /**< IO-mapped pointer to registers */
+} platform_pmu_t;
+
+static platform_pmu_t *pmu_info = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_STAT = 0x14, /*< Interrupt status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Software delay register */
+ PMU_REG_ADDR_MGMT_MASTER_PWR_UP = 0x24, /*< Master power up register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+/* Internal functions */
+u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address);
+void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val);
+mali_pmm_core_mask pmu_translate_cores_to_pmu(mali_pmm_core_mask cores);
+#if PMU_TEST
+void pmm_pmu_dump_regs( platform_pmu_t *pmu );
+void pmm_pmu_test( platform_pmu_t *pmu, u32 cores );
+#endif
+
+#endif /* USING_MALI_PMM */
+
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+#if USING_MALI_PMM
+ if( resource == NULL )
+ {
+ /* Nothing to set up for the system */
+ }
+ else if( resource->type == PMU )
+ {
+ if( (resource->base == 0) ||
+ (resource->description == NULL) )
+ {
+ /* NOTE: We currently don't care about any other resource settings */
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Missing PMU set up information\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_ASSERT( pmu_info == NULL );
+ pmu_info = (platform_pmu_t *)_mali_osk_malloc(sizeof(*pmu_info));
+ MALI_CHECK_NON_NULL( pmu_info, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmu_info, 0, sizeof(*pmu_info));
+
+ pmu_info->reg_base_addr = resource->base;
+ pmu_info->reg_size = (u32)PMU_REGISTER_ADDRESS_SPACE_SIZE;
+ pmu_info->name = resource->description;
+ pmu_info->irq_num = resource->irq;
+
+ if( _MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name) )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not request register region (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: request_mem_region: (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ }
+
+ pmu_info->reg_mapped = _mali_osk_mem_mapioregion( pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name );
+
+ if( 0 == pmu_info->reg_mapped )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not ioremap registers for %s .\n", pmu_info->name));
+ _mali_osk_mem_unreqregion( pmu_info->reg_base_addr, pmu_info->reg_size );
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) for %s\n",
+ (u32) pmu_info->reg_mapped,
+ ((u32)pmu_info->reg_mapped)+ pmu_info->reg_size - 1,
+ pmu_info->name));
+ }
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: Mapping registers to %s\n", pmu_info->name));
+
+#if PMU_TEST
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP));
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP|MALI_PMM_CORE_L2|MALI_PMM_CORE_PP0));
+#endif
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Initialized - %s\n", pmu_info->name) );
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+cleanup:
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+#else
+ /* Nothing to do when not using PMM - as mali already on */
+ MALI_SUCCESS;
+#endif
+
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+#if USING_MALI_PMM
+ if( type == NULL )
+ {
+ /* Nothing to tear down for the system */
+ }
+ else if (*type == PMU)
+ {
+ if( pmu_info )
+ {
+ _mali_osk_mem_unmapioregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->reg_mapped);
+ _mali_osk_mem_unreqregion(pmu_info->reg_base_addr, pmu_info->reg_size);
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Terminated PMU\n") );
+ }
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+#if USING_MALI_PMM
+ u32 stat;
+ u32 timeout;
+ u32 cores_pmu;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power down (0x%x)\n", cores) );
+
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_DOWN, cores_pmu );
+
+ /* Wait for cores to be powered down */
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == cores_pmu ) break; /* All cores we wanted are now asleep */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+#if USING_MALI_PMM
+ u32 cores_pmu;
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power up (0x%x)\n", cores) );
+
+ /* Don't use interrupts - just poll status */
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_INT_MASK, 0 );
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_UP, cores_pmu );
+
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == 0 ) break; /* All cores we wanted are now awake */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+
+#else
+ /* Nothing to do when not using PMM */
+ MALI_SUCCESS;
+#endif
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+#if USING_MALI_PMM
+
+/***** INTERNAL *****/
+
+/** @brief Internal PMU function to translate the cores bit mask
+ * into something the hardware PMU understands
+ *
+ * @param cores PMM cores bitmask
+ * @return PMU hardware cores bitmask
+ */
+u32 pmu_translate_cores_to_pmu(mali_pmm_core_mask cores)
+{
+ /* For Mali 400 PMU the cores mask is already the same as what
+ * the hardware PMU expects.
+ * For other hardware, some translation can be done here, by
+ * translating the MALI_PMM_CORE_* bits into specific hardware
+ * bits
+ */
+ return cores;
+}
+
+/** @brief Internal PMU function to read a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to read from
+ * @return 32-bit value that was read from the address
+ */
+u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address)
+{
+ u32 read_val;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ read_val = _mali_osk_mem_ioread32(pmu->reg_mapped, relative_address);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_read: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, read_val));
+
+ return read_val;
+}
+
+/** @brief Internal PMU function to write to a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to write to
+ * @param new_val new 32-bit value to write into the address
+ */
+void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_write: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(pmu->reg_mapped, relative_address, new_val);
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+u32 pmu_get_power_up_down_info(void)
+{
+ return pmu_reg_read(pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS);
+}
+
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+#endif /* USING_MALI_PMM */
+
+
+#if USING_MALI_PMM && PMU_TEST
+
+/***** TEST *****/
+
+void pmu_dump_regs( platform_pmu_t *pmu )
+{
+ u32 addr;
+ for( addr = 0x0; addr < PMU_REGISTER_ADDRESS_SPACE_SIZE; addr += 0x4 )
+ {
+ MALI_PRINT( ("PMU_REG: 0x%08x: 0x%04x\n", (addr + pmu->reg_base_addr), pmu_reg_read( pmu, addr ) ) );
+ }
+}
+
+/* This function is an internal test for the PMU without any Mali h/w interaction */
+void pmu_test( platform_pmu_t *pmu, u32 cores )
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_PRINT( ("PMU_TEST: Start\n") );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power down cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_down( pmu, cores, MALI_TRUE );
+
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == cores ? "SUCCESS" : "FAIL" ) );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power up cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_up( pmu, cores, MALI_FALSE );
+
+ MALI_PRINT( ("PMU_TEST: Waiting for power up...\n") );
+ timeout = 1000; /* 1 sec */
+ while( !_mali_pmm_pmu_irq_power_up(pmu) && timeout > 0 )
+ {
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ }
+
+ MALI_PRINT( ("PMU_TEST: Waited %dms for interrupt\n", (1000-timeout)) );
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == 0 ? "SUCCESS" : "FAIL" ) );
+
+ _mali_pmm_pmu_irq_power_up_clear(pmu);
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Finish\n") );
+}
+#endif /* USING_MALI_PMM && PMU_TEST */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h
new file mode 100644
index 00000000000..575c1fb6113
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/mali_platform.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif
+
+#if !USING_MALI_PMM
+/* @brief System power up/down cores that can be passed into mali_platform_powerdown/up() */
+#define MALI_PLATFORM_SYSTEM 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ * When using PMM, it is also called from the PMM start up to initialise the
+ * system PMU
+ *
+ * @param resource This is NULL when called on first driver start up, else it will
+ * be a pointer to a PMU resource
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ * When using PMM, it is also called from the PMM termination code to clean up the
+ * system PMU
+ *
+ * @param type This is NULL when called on driver exit, else it will
+ * be a pointer to a PMU resource type (not the full resource)
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Called as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores);
+
+/** @brief Platform specific powerup sequence of MALI
+ *
+ * Called as part of platform deinit if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_powerup(u32 cores);
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(u32 utilization);
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief function to get status of individual cores
+ *
+ * This function is used by power management test suite to get the status of powered up/down the number
+ * of cores
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+u32 pmu_get_power_up_down_info(void);
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c
new file mode 100644
index 00000000000..481e6e76c54
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/mali_platform.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Mali related Ux500 platform initialization
+ *
+ * Author: Marta Lofstedt <marta.lofstedt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for ST-Ericsson's Ux500 platforms
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define MALI_HIGH_TO_LOW_LEVEL_UTILIZATION_LIMIT 64
+#define MALI_LOW_TO_HIGH_LEVEL_UTILIZATION_LIMIT 192
+
+static int is_running;
+static struct regulator *regulator;
+static struct clk *clk_sga;
+static u32 last_utilization;
+static struct work_struct mali_utilization_work;
+static struct workqueue_struct *mali_utilization_workqueue;
+
+/* Rationale behind the values for:
+* MALI_HIGH_LEVEL_UTILIZATION_LIMIT and MALI_LOW_LEVEL_UTILIZATION_LIMIT
+* When operating at half clock frequency a faster clock is requested when
+* reaching 75% utilization. When operating at full clock frequency a slower
+* clock is requested when reaching 25% utilization. There is a margin of 25%
+* at the high range of the slow clock to avoid complete saturation of the
+* hardware and there is some overlap to avoid an oscillating situation where
+* the clock goes back and forth from high to low.
+*
+* Utilization on full speed clock
+* 0 64 128 192 255
+* |---------------|---------------|---------------|---------------|
+* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+* | ^
+* V |
+* XXXXXXXXXXXXXXXXXXXXXXXXX
+* 0 64 128 192 255
+* |-------|-------|-------|-------|
+* Utilization on half speed clock
+*/
+void mali_utilization_function(struct work_struct *ptr)
+{
+ /*By default, platform start with 50% APE OPP and 25% DDR OPP*/
+ static u32 has_requested_low = 1;
+
+ if (last_utilization > MALI_LOW_TO_HIGH_LEVEL_UTILIZATION_LIMIT) {
+ if (has_requested_low) {
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_HIGH\n", last_utilization));
+ /*Request 100% APE_OPP.*/
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "mali", 100)) {
+ MALI_DEBUG_PRINT(2, ("MALI 100% APE_OPP failed\n"));
+ return;
+ }
+ /*
+ * Since the utilization values will be reported higher
+ * if DDR_OPP is lowered, we also request 100% DDR_OPP.
+ */
+ if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, "mali", 100)) {
+ MALI_DEBUG_PRINT(2, ("MALI 100% DDR_OPP failed\n"));
+ return;
+ }
+ has_requested_low = 0;
+ }
+ } else {
+ if (last_utilization < MALI_HIGH_TO_LOW_LEVEL_UTILIZATION_LIMIT) {
+ if (!has_requested_low) {
+ /*Remove APE_OPP and DDR_OPP requests*/
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "mali");
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP, "mali");
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_LOW\n", last_utilization));
+ has_requested_low = 1;
+ }
+ }
+ }
+ MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u\n", last_utilization));
+}
+
+_mali_osk_errcode_t mali_platform_init(_mali_osk_resource_t *resource)
+{
+ is_running = 0;
+ last_utilization = 0;
+
+ mali_utilization_workqueue = create_singlethread_workqueue("mali_utilization_workqueue");
+ if (NULL == mali_utilization_workqueue) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to setup workqueue %s\n", __func__, "v-mali"));
+ goto error;
+ }
+ INIT_WORK(&mali_utilization_work, mali_utilization_function);
+
+ regulator = regulator_get(NULL, "v-mali");
+ if (IS_ERR(regulator)) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to get regulator %s\n", __func__, "v-mali"));
+ goto error;
+ }
+
+ clk_sga = clk_get_sys("mali", NULL);
+ if (IS_ERR(clk_sga)) {
+ regulator_put(regulator);
+ MALI_DEBUG_PRINT(2, ("%s: Failed to get clock %s\n", __func__, "mali"));
+ goto error;
+ }
+
+ MALI_SUCCESS;
+error:
+ MALI_DEBUG_PRINT(1, ("SGA initialization failed.\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t mali_platform_deinit(_mali_osk_resource_type_t *type)
+{
+ destroy_workqueue(mali_utilization_workqueue);
+ regulator_put(regulator);
+ clk_put(clk_sga);
+ MALI_DEBUG_PRINT(2, ("SGA terminated.\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ if (is_running) {
+ clk_disable(clk_sga);
+ if (regulator) {
+ int ret = regulator_disable(regulator);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to disable regulator %s\n", __func__, "v-mali"));
+ is_running = 0;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ is_running = 0;
+ }
+ MALI_DEBUG_PRINT(4, ("mali_platform_powerdown is_running: %u cores: %u\n", is_running, cores));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ if (!is_running) {
+ int ret = regulator_enable(regulator);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(2, ("%s: Failed to enable regulator %s\n", __func__, "v-mali"));
+ goto error;
+ }
+
+ ret = clk_enable(clk_sga);
+ if (ret < 0) {
+ regulator_disable(regulator);
+ MALI_DEBUG_PRINT(2, ("%s: Failed to enable clock %s\n", __func__, "mali"));
+ goto error;
+ }
+ is_running = 1;
+ }
+ MALI_DEBUG_PRINT(4, ("mali_platform_powerup is_running:%u cores: %u\n", is_running, cores));
+ MALI_SUCCESS;
+error:
+ MALI_DEBUG_PRINT(1, ("Failed to power up.\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+ last_utilization = utilization;
+ /*
+ * We should not cancel the potentially not yet run old work
+ * in favor of a new work.
+ * Since the utilization value will change,
+ * the mali_utilization_function will evaluate based on
+ * what is the utilization now and not on what it was
+ * when it was scheduled.
+ */
+ queue_work(mali_utilization_workqueue, &mali_utilization_work);
+}
+
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c
new file mode 100644
index 00000000000..b321b504839
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/platform/ux500/ump_kernel_api_hwmem.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Magnus Wendt <magnus.wendt@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "ump_kernel_types.h"
+#include "mali_kernel_common.h"
+
+#include <linux/hwmem.h>
+#include <linux/err.h>
+
+
+/* The UMP kernel API for hwmem has been mapped so that
+ * ump_dd_handle == hwmem_alloc
+ * ump_secure_id == hwmem global name
+ *
+ * The current implementation is limited to contiguous memory
+ */
+
+ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ int hwmem_name = hwmem_get_name((struct hwmem_alloc *) memh);
+
+ if (unlikely(hwmem_name < 0)) {
+ MALI_DEBUG_PRINT(1, ("%s: Invalid Alloc 0x%x\n",__func__, memh));
+ return UMP_INVALID_SECURE_ID;
+ }
+
+ return (ump_secure_id)hwmem_name;
+}
+
+
+
+ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name((int) secure_id);
+
+ if (IS_ERR(alloc)) {
+ MALI_DEBUG_PRINT(1, ("%s: Invalid UMP id %d\n",__func__, secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ hwmem_get_info(alloc, NULL, &mem_type, &access);
+
+ if (unlikely((access & (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT)) !=
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | HWMEM_ACCESS_IMPORT))) {
+ MALI_DEBUG_PRINT(1, ("%s: Access denied on UMP id %d, (access==%d)\n",
+ __func__, secure_id, access));
+ hwmem_release(alloc);
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (unlikely(HWMEM_MEM_CONTIGUOUS_SYS != mem_type)) {
+ MALI_DEBUG_PRINT(1, ("%s: UMP id %d is non-contiguous! (not supported)\n",
+ __func__, secure_id));
+ hwmem_release(alloc);
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ return (ump_dd_handle)alloc;
+}
+
+
+
+unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ return 1;
+}
+
+
+
+ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh,
+ ump_dd_physical_block * blocks,
+ unsigned long num_blocks)
+{
+ struct hwmem_mem_chunk hwmem_mem_chunk;
+ size_t hwmem_mem_chunk_length = 1;
+
+ int hwmem_result;
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+
+ if (unlikely(blocks == NULL)) {
+ MALI_DEBUG_PRINT(1, ("%s: blocks == NULL\n",__func__));
+ return UMP_DD_INVALID;
+ }
+
+ if (unlikely(1 != num_blocks)) {
+ MALI_DEBUG_PRINT(1, ("%s: num_blocks == %d (!= 1)\n",__func__, num_blocks));
+ return UMP_DD_INVALID;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Returning physical block information. Alloc: 0x%x\n", memh));
+
+ /* It might not look natural to pin here, but it matches the usage by the mali kernel module */
+ hwmem_result = hwmem_pin(alloc, &hwmem_mem_chunk, &hwmem_mem_chunk_length);
+
+ if (unlikely(hwmem_result < 0)) {
+ MALI_DEBUG_PRINT(1, ("%s: Pin failed. Alloc: 0x%x\n",__func__, memh));
+ return UMP_DD_INVALID;
+ }
+
+ blocks[0].addr = hwmem_mem_chunk.paddr;
+ blocks[0].size = hwmem_mem_chunk.size;
+
+ hwmem_set_domain(alloc, HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_SYNC, NULL);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh,
+ unsigned long index,
+ ump_dd_physical_block * block)
+{
+ if (unlikely(0 != index)) {
+ MALI_DEBUG_PRINT(1, ("%s: index == %d (!= 0)\n",__func__, index));
+ return UMP_DD_INVALID;
+ }
+ return ump_dd_phys_blocks_get(memh, block, 1);
+}
+
+
+
+unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+ int size;
+
+ hwmem_get_info(alloc, &size, NULL, NULL);
+
+ return size;
+}
+
+
+
+void ump_dd_reference_add(ump_dd_handle memh)
+{
+ /* This is never called from tha mali kernel driver */
+}
+
+
+
+void ump_dd_reference_release(ump_dd_handle memh)
+{
+ struct hwmem_alloc *alloc = (struct hwmem_alloc *)memh;
+
+ hwmem_unpin(alloc);
+ hwmem_release(alloc);
+
+ return;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt
new file mode 100644
index 00000000000..af40fddd20f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/readme.txt
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_MMU=<mmu_option> USING_UMP=<ump_option> \
+BUILD=<build_option> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ mmu_option: 1 = Mali MMU(s) on
+ 0 = Mali MMU(s) off
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+ your_config: Name of the sub-folder to find the required config.h(**) file
+ ("arch-" will be prepended)
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+(**) The config.h file contains the configuration parameters needed, like where the
+ Mali GPU is located, interrupts it uses, memory and so on.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h
new file mode 100644
index 00000000000..6b30802bb2d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_200_regs.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1<<0),
+#if defined(USING_MALI200)
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1<<3),
+#endif
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1<<5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1<<6),
+#if defined(USING_MALI400)
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1<<7),
+#endif
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1<<0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1<<1),
+ MALI200_REG_VAL_IRQ_HANG = (1<<2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1<<3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1<<4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1<<5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1<<6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1<<7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1<<8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1<<10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1<<11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1<<12),
+};
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+#else
+#error "No supported mali core defined"
+#endif
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+#else
+#error "No supported mali core defined"
+#endif
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1<<0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1<<4),
+};
+
+enum mali200_render_unit
+{
+ MALI200_REG_ADDR_FRAME = 0x0000,
+};
+
+#if defined USING_MALI200
+#define MALI200_NUM_REGS_FRAME ((0x04C/4)+1)
+#elif defined USING_MALI400
+#define MALI200_NUM_REGS_FRAME ((0x058/4)+1)
+#else
+#error "No supported mali core defined"
+#endif
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+/** The number of registers in one single writeback unit */
+#ifndef MALI200_NUM_REGS_WBx
+#define MALI200_NUM_REGS_WBx ((0x02C/4)+1)
+#endif
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#if defined USING_MALI200
+#define MALI_PP_PRODUCT_ID 0xC807
+#elif defined USING_MALI400
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI_PP_PRODUCT_ID MALI400_PP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h
new file mode 100644
index 00000000000..11eb55c424e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/regs/mali_gp_regs.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor controll registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builer in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+ MALIGP2_REG_VAL_CMD_START_VS = (1<< 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1<< 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1<< 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1<< 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1<< 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1<< 9),
+#if defined(USING_MALI400)
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1<<10),
+#endif
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#if defined USING_MALI400
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+#elif !defined USING_MALI200
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining all IRQs in MaliGP2 */
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining the IRQs in MaliGP2 which we use*/
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#if defined(USING_MALI200)
+#define MALI_GP_PRODUCT_ID 0xA07
+#elif defined(USING_MALI400)
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI_GP_PRODUCT_ID MALI400_GP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 00000000000..a6b1d76d567
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 00000000000..96b709bc166
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__ ("MCR p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__ ("MRC p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c
new file mode 100644
index 00000000000..a6b1d76d567
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h
new file mode 100644
index 00000000000..11031d675ca
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile
new file mode 100644
index 00000000000..97225bbbf0b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile
@@ -0,0 +1,115 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+UMP_FILE_PREFIX =
+UDD_FILE_PREFIX = ../mali/
+
+ifneq ($(KBUILD_EXTMOD),)
+include $(KBUILD_EXTMOD)/Makefile.common
+else
+include ./Makefile.common
+endif
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+ARCH ?= arm
+## @note Should allow overriding of building UMP for non-debug:
+EXTRA_CFLAGS += -DDEBUG -DMALI_STATE_TRACKING=0
+
+# linux build system integration
+
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/common -I$(KBUILD_EXTMOD)/linux -I$(KBUILD_EXTMOD)/../mali/common -I$(KBUILD_EXTMOD)/../mali/linux -I$(KBUILD_EXTMOD)/../../ump/include/ump
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+ifeq ($(wildcard $(KBUILD_EXTMOD)/linux/license/gpl/*),)
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/linux/license/proprietary
+else
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/linux/license/gpl
+endif
+
+SRC += $(UMP_FILE_PREFIX)linux/ump_kernel_linux.c \
+ $(UMP_FILE_PREFIX)linux/ump_kernel_memory_backend_os.c \
+ $(UMP_FILE_PREFIX)linux/ump_kernel_memory_backend_dedicated.c \
+ $(UMP_FILE_PREFIX)linux/ump_memory_backend.c \
+ $(UMP_FILE_PREFIX)linux/ump_ukk_wrappers.c \
+ $(UMP_FILE_PREFIX)linux/ump_ukk_ref_wrappers.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_atomics.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_low_level_mem.c \
+ $(UMP_FILE_PREFIX)linux/ump_osk_misc.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_atomics.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_locks.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_memory.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_math.c \
+ $(UDD_FILE_PREFIX)linux/mali_osk_misc.c
+
+# Selecting files to compile by parsing the config file
+
+MODULE:=ump.ko
+
+obj-m := $(MODULE:.ko=.o)
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ # default to Virtex5
+ CONFIG ?= pb-virtex5
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= ct11mp
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d arch-$(CONFIG) ] && [ -f arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L arch ] && rm arch)
+$(shell ln -sf arch-$(CONFIG) arch)
+$(shell touch arch/config.h)
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR)/../mali clean
+
+endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common
new file mode 100644
index 00000000000..f797c85a6f2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/Makefile.common
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_descriptor_mapping.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_api.c \
+ $(UMP_FILE_PREFIX)common/ump_kernel_ref_drv.c
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qvE '(exported|Unversioned)' && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h
new file mode 100644
index 00000000000..38ae1eeb3c2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/arch-pb-virtex5/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xCE000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c
new file mode 100644
index 00000000000..719370c3de7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_api.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+
+
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+ return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ ump_dd_mem * mem;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ ump_dd_reference_add(mem);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return (ump_dd_handle)mem;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*) memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (blocks == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (mem->nr_blocks != num_blocks)
+ {
+ DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+ _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (block == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (index >= mem->nr_blocks)
+ {
+ DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+ *block = mem->block_array[index];
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+ int new_ref;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+ int new_ref;
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ /* We must hold this mutex while doing the atomic_dec_and_read, to protect
+ that elements in the ump_descriptor_mapping table is always valid. If they
+ are not, userspace may accidently map in this secure_ids right before its freed
+ giving a mapped backdoor into unallocated memory.*/
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+
+ if (0 == new_ref)
+ {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+{
+ ump_session_data * session_data;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ session_data = (ump_session_data *)args->ctx;
+
+ /* check compatability */
+ if (args->version == UMP_IOCTL_API_VERSION)
+ {
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else if (args->version == MAKE_VERSION_ID(1))
+ {
+ DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else
+ {
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ args->compatible = 0;
+ args->version = UMP_IOCTL_API_VERSION; /* report our version */
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+{
+ ump_session_memory_list_element * session_memory_element;
+ ump_session_memory_list_element * tmp;
+ ump_session_data * session_data;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+ int secure_id;
+
+ DEBUG_ASSERT_POINTER( release_info );
+ DEBUG_ASSERT_POINTER( release_info->ctx );
+
+ /* Retreive the session data */
+ session_data = (ump_session_data*)release_info->ctx;
+
+ /* If there are many items in the memory session list we
+ * could be de-referencing this pointer a lot so keep a local copy
+ */
+ secure_id = release_info->secure_id;
+
+ DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+ /* Iterate through the memory list looking for the requested secure ID */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ if ( session_memory_element->mem->secure_id == secure_id)
+ {
+ ump_dd_mem *release_mem;
+
+ release_mem = session_memory_element->mem;
+ _mali_osk_list_del(&session_memory_element->list);
+ ump_dd_reference_release(release_mem);
+ _mali_osk_free(session_memory_element);
+
+ ret = _MALI_OSK_ERR_OK;
+ break;
+ }
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+ DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+ return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+{
+ ump_dd_mem * mem;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+
+ /* We lock the mappings so things don't get removed while we are looking for the memory */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
+ {
+ user_interaction->size = mem->size_bytes;
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+ ret = _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ user_interaction->size = 0;
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+
+
+void _ump_ukk_msync( _ump_uk_msync_s *args )
+{
+ ump_dd_mem * mem = NULL;
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (NULL==mem)
+ {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ /* Returns the cache settings back to Userspace */
+ args->is_cached=mem->is_cached;
+
+ /* If this flag is the only one set, we should not do the actual flush, only the readout */
+ if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+ return;
+ }
+
+ /* Nothing to do if the memory is not caches */
+ if ( 0==mem->is_cached )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+ return ;
+ }
+ DBG_MSG(3, ("_ump_ukk_msync FLUSHING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+
+ /* The actual cache flush - Implemented for each OS*/
+ _ump_osk_msync( mem , args->op);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c
new file mode 100644
index 00000000000..b99c3e7c7d2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128 )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+ _mali_osk_errcode_t err;
+
+ /* Perform OS Specific initialization */
+ err = _ump_osk_init();
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+ return err;
+ }
+
+ /* Init the global device */
+ _mali_osk_memset(&device, 0, sizeof(device) );
+
+ /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+ device.secure_id_map_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0 , 0);
+ if (NULL == device.secure_id_map_lock)
+ {
+ MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+ if (NULL == device.secure_id_map)
+ {
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ MSG_ERR(("Failed to create secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Init memory backend */
+ device.backend = ump_memory_backend_create();
+ if (NULL == device.backend)
+ {
+ MSG_ERR(("Failed to create memory backend\n"));
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+ DEBUG_ASSERT_POINTER(device.secure_id_map);
+ DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
+
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ device.secure_id_map_lock = NULL;
+
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ device.secure_id_map = NULL;
+
+ device.backend->shutdown(device.backend);
+ device.backend = NULL;
+
+ ump_memory_backend_destroy();
+
+ _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open( void** context )
+{
+ struct ump_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0);
+ if( NULL == session_data->lock )
+ {
+ MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+
+ if ( NULL == session_data->cookies_map )
+ {
+ MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+ _mali_osk_lock_term( session_data->lock );
+ _mali_osk_free( session_data );
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+ /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+ that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+ Current and later API versions would do an additional call to this IOCTL and update this variable
+ to the correct one.*/
+ session_data->api_version = MAKE_VERSION_ID(1);
+
+ *context = (void*)session_data;
+
+ DBG_MSG(2, ("New session opened\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close( void** context )
+{
+ struct ump_session_data * session_data;
+ ump_session_memory_list_element * item;
+ ump_session_memory_list_element * tmp;
+
+ session_data = (struct ump_session_data *)*context;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Unmap any descriptors mapped in. */
+ if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list))
+ {
+ ump_memory_allocation *descriptor;
+ ump_memory_allocation *temp;
+
+ DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list)
+ {
+ _ump_uk_unmap_mem_s unmap_args;
+ DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void*)session_data;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.size = descriptor->size;
+ unmap_args._ukk_private = NULL; /* NOTE: unused */
+ unmap_args.cookie = descriptor->cookie;
+
+ /* NOTE: This modifies the list_head_session_memory_mappings_list */
+ _ump_ukk_unmap_mem( &unmap_args );
+ }
+ }
+
+ /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+ * can fail silently. */
+ DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+
+ _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ _mali_osk_list_del(&item->list);
+ DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+ ump_dd_reference_release(item->mem);
+ _mali_osk_free(item);
+ }
+
+ ump_descriptor_mapping_destroy( session_data->cookies_map );
+
+ _mali_osk_lock_term(session_data->lock);
+ _mali_osk_free(session_data);
+
+ DBG_MSG(2, ("Session closed\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor; /* Describes current mapping of memory */
+ _mali_osk_errcode_t err;
+ unsigned long offset = 0;
+ unsigned long left;
+ ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
+ ump_dd_mem * mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ u32 block;
+ int map_id;
+
+ session_data = (ump_session_data *)args->ctx;
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+ if (NULL == descriptor)
+ {
+ MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ _mali_osk_free(descriptor);
+ DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mem = (ump_dd_mem*)handle;
+ DEBUG_ASSERT(mem);
+ if (mem->size_bytes != args->size)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+
+ if (map_id < 0)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ descriptor->size = args->size;
+ descriptor->handle = handle;
+ descriptor->phys_addr = args->phys_addr;
+ descriptor->process_mapping_info = args->_ukk_private;
+ descriptor->ump_session = session_data;
+ descriptor->cookie = (u32)map_id;
+
+ if ( mem->is_cached )
+ {
+ descriptor->is_cached = 1;
+ args->is_cached = 1;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+ }
+ else
+ {
+ descriptor->is_cached = 0;
+ args->is_cached = 0;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
+ }
+
+ _mali_osk_list_init( &descriptor->list );
+
+ err = _ump_osk_mem_mapregion_init( descriptor );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(mem);
+ return err;
+ }
+
+ DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
+
+ left = descriptor->size;
+ /* loop over all blocks and map them in */
+ for (block = 0; block < mem->nr_blocks; block++)
+ {
+ unsigned long size_to_map;
+
+ if (left > mem->block_array[block].size)
+ {
+ size_to_map = mem->block_array[block].size;
+ }
+ else
+ {
+ size_to_map = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) )
+ {
+ DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_dd_reference_release(mem);
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ left -= size_to_map;
+ offset += size_to_map;
+ }
+
+ /* Add to the ump_memory_allocation tracking list */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = descriptor->cookie;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor;
+ ump_dd_handle handle;
+
+ session_data = (ump_session_data *)args->ctx;
+
+ if( NULL == session_data )
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) )
+ {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+ return;
+ }
+
+ DEBUG_ASSERT_POINTER(descriptor);
+
+ handle = descriptor->handle;
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+ return;
+ }
+
+ /* Remove the ump_memory_allocation from the list of tracked mappings */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_del( &descriptor->list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+
+ ump_dd_reference_release(handle);
+
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h
new file mode 100644
index 00000000000..0c55b14bc94
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_common.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+ extern int ump_debug_level;
+ #define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+ #define UMP_DEBUG_CODE(args) args
+ #define DBG_MSG(level,args) do { /* args should be in brackets */ \
+ ((level) <= ump_debug_level)?\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+ UMP_DEBUG_PRINT(args):0; \
+ } while (0)
+
+ #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+ if((condition)&&((level) <= ump_debug_level)) {\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+ else if((level) <= ump_debug_level) { \
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+ #define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+ #define UMP_DEBUG_PRINT(args) do {} while(0)
+ #define UMP_DEBUG_CODE(args)
+ #define DBG_MSG(level,args) do {} while(0)
+ #define DBG_MSG_IF(level,condition,args) do {} while(0)
+ #define DBG_MSG_ELSE(level,args) do {} while(0)
+ #define DEBUG_ASSERT(condition) do {} while(0)
+ #define DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
+ } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data
+{
+ _mali_osk_list_t list_head_session_memory_list; /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+ _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+ int api_version;
+ _mali_osk_lock_t * lock;
+ ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element
+{
+ struct ump_dd_mem * mem;
+ _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev
+{
+ _mali_osk_lock_t * secure_id_map_lock;
+ ump_descriptor_mapping * secure_id_map;
+ ump_memory_backend * backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode( _mali_osk_errcode_t err );
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c
new file mode 100644
index 00000000000..2531f802127
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table * table);
+
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_READERWRITER, 0 , 0);
+ if ( NULL != map->lock )
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term( map->lock );
+ _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+{
+ int descriptor = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings)
+ {
+ int nr_mappings_new;
+ /* no free descriptor, try to expand the table */
+ ump_descriptor_table * new_table;
+ ump_descriptor_table * old_table = map->table;
+ nr_mappings_new= map->current_nr_mappings *2;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+{
+ int result = -1;/*-EFAULT;*/
+ DEBUG_ASSERT(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+{
+ int result = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = 0;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table * descriptor_table_alloc(int count)
+{
+ ump_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h
new file mode 100644
index 00000000000..92bbe54bd35
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ ump_descriptor_table * table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h
new file mode 100644
index 00000000000..02a64707036
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_memory_backend.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation
+{
+ void * phys_addr;
+ void * mapping;
+ unsigned long size;
+ ump_dd_handle handle;
+ void * process_mapping_info;
+ u32 cookie; /**< necessary on some U/K interface implementations */
+ struct ump_session_data * ump_session; /**< Session that this allocation belongs to */
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+ u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend
+{
+ int (*allocate)(void* ctx, ump_dd_mem * descriptor);
+ void (*release)(void* ctx, ump_dd_mem * descriptor);
+ void (*shutdown)(struct ump_memory_backend * backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32 (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void * ctx;
+} ump_memory_backend;
+
+ump_memory_backend * ump_memory_backend_create ( void );
+void ump_memory_backend_destroy( void );
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c
new file mode 100644
index 00000000000..5a997e222a7
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_ref_drv.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE 4096
+#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem;
+ unsigned long size_total = 0;
+ int map_id;
+ u32 i;
+
+ /* Go through the input blocks and verify that they are sane */
+ for (i=0; i < num_blocks; i++)
+ {
+ unsigned long addr = blocks[i].addr;
+ unsigned long size = blocks[i].size;
+
+ DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+ size_total += blocks[i].size;
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
+ {
+ MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(size))
+ {
+ MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+ return UMP_DD_HANDLE_INVALID;
+ }
+ }
+
+ /* Allocate the ump_dd_mem struct for this allocation */
+ mem = _mali_osk_malloc(sizeof(*mem));
+ if (NULL == mem)
+ {
+ DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Find a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Now, make a copy of the block information supplied by the user */
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+ if (NULL == mem->block_array)
+ {
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+ /* And setup the rest of the ump_dd_mem struct */
+ _mali_osk_atomic_init(&mem->ref_count, 1);
+ mem->secure_id = (ump_secure_id)map_id;
+ mem->size_bytes = size_total;
+ mem->nr_blocks = num_blocks;
+ mem->backend_info = NULL;
+ mem->ctx = NULL;
+ mem->release_func = phys_blocks_release;
+ /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+ mem->is_cached = 0;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+{
+ _mali_osk_free(descriptor->block_array);
+ descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+{
+ ump_session_data * session_data = NULL;
+ ump_dd_mem *new_allocation = NULL;
+ ump_session_memory_list_element * session_memory_element = NULL;
+ int map_id;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+ DEBUG_ASSERT_POINTER( user_interaction->ctx );
+
+ session_data = (ump_session_data *) user_interaction->ctx;
+
+ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+ if (NULL == session_memory_element)
+ {
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+
+ new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
+ if (NULL==new_allocation)
+ {
+ _mali_osk_free(session_memory_element);
+ DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Create a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return - _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ /* Initialize the part of the new_allocation that we know so for */
+ new_allocation->secure_id = (ump_secure_id)map_id;
+ _mali_osk_atomic_init(&new_allocation->ref_count,1);
+ if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+ new_allocation->is_cached = 0;
+ else new_allocation->is_cached = 1;
+
+ /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+ if (0 == user_interaction->size)
+ {
+ user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+ }
+
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+
+ /* Now, ask the active memory backend to do the actual memory allocation */
+ if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
+ {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(new_allocation);
+ _mali_osk_free(session_memory_element);
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ new_allocation->ctx = device.backend->ctx;
+ new_allocation->release_func = device.backend->release;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element->mem = new_allocation;
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ user_interaction->secure_id = new_allocation->secure_id;
+ user_interaction->size = new_allocation->size_bytes;
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h
new file mode 100644
index 00000000000..dc79b6f289b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_kernel_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem
+{
+ ump_secure_id secure_id;
+ _mali_osk_atomic_t ref_count;
+ unsigned long size_bytes;
+ unsigned long nr_blocks;
+ ump_dd_physical_block * block_array;
+ void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
+ void * ctx;
+ void * backend_info;
+ int is_cached;
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h
new file mode 100644
index 00000000000..73284f02b47
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_osk.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include <ump_uk_types.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t _ump_osk_init( void );
+
+_mali_osk_errcode_t _ump_osk_term( void );
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h
new file mode 100644
index 00000000000..b08335f61f1
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_uk_types.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+}_ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void * _ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void * _ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h
new file mode 100644
index 00000000000..a3317fc7b21
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/common/ump_ukk.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open( void** context );
+
+_mali_osk_errcode_t _ump_ukk_close( void** context );
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_ukk_msync( _ump_uk_msync_s *args );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h
new file mode 100644
index 00000000000..17b930d2c57
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/license/gpl/ump_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h
new file mode 100644
index 00000000000..b11429816ab
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ioctl.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "../common/ump_uk_types.h"
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_size_get_s)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c
new file mode 100644
index 00000000000..76d6b2f913a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory management functions and types */
+#include <asm/uaccess.h> /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include "arch/config.h" /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker
+{
+ int references;
+ ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device
+{
+ struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+ struct class * ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops =
+{
+ .owner = THIS_MODULE,
+ .open = ump_file_open,
+ .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = ump_file_ioctl,
+#else
+ .ioctl = ump_file_ioctl,
+#endif
+ .mmap = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+ _mali_osk_errcode_t err;
+
+ DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+ err = ump_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("UMP device driver init failed\n"));
+ return map_errcode(err);
+ }
+
+ MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+ DBG_MSG(2, ("Unloading UMP device driver\n"));
+ ump_kernel_destructor();
+ DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+
+ if (0 == ump_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&ump_device, 0, sizeof(ump_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_device.cdev, &ump_fops);
+ ump_device.cdev.owner = THIS_MODULE;
+ ump_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+ if (0 == err)
+ {
+
+#if UMP_LICENSE_IS_GPL
+ ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_device.ump_class))
+ {
+ err = PTR_ERR(ump_device.ump_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&ump_device.cdev);
+#else
+ return 0;
+#endif
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+ device_destroy(ump_device.ump_class, dev);
+ class_destroy(ump_device.ump_class);
+#endif
+
+ /* unregister char device */
+ cdev_del(&ump_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+ struct ump_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev))
+ {
+ MSG_ERR(("Minor not zero in ump_file_open()\n"));
+ return -ENODEV;
+ }
+
+ /* Call the OS-Independent UMP Open function */
+ err = _ump_ukk_open((void**) &session_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Ump failed to open a new session\n"));
+ return map_errcode( err );
+ }
+
+ filp->private_data = (void*)session_data;
+ filp->f_pos = 0;
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ err = _ump_ukk_close((void**) &filp->private_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err = -ENOTTY;
+ void __user * argument;
+ struct ump_session_data * session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* inode not used */
+#endif
+
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("No session data attached to file object\n"));
+ return -ENOTTY;
+ }
+
+ /* interpret the argument as a user pointer to something */
+ argument = (void __user *)arg;
+
+ switch (cmd)
+ {
+ case UMP_IOC_QUERY_API_VERSION:
+ err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_ALLOCATE :
+ err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_RELEASE:
+ err = ump_release_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SIZE_GET:
+ err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_MSYNC:
+ err = ump_msync_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ default:
+ DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ _ump_uk_map_mem_s args;
+ _mali_osk_errcode_t err;
+ struct ump_session_data * session_data;
+
+ /* Validate the session data */
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("mmap() called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = 0;
+ args.size = vma->vm_end - vma->vm_start;
+ args._ukk_private = vma;
+ args.secure_id = vma->vm_pgoff;
+ args.is_cached = 0;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ {
+ args.is_cached = 1;
+ vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
+ DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+ }
+
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+
+ /* Call the common mmap handler */
+ err = _ump_ukk_map_mem( &args );
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+module_init(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h
new file mode 100644
index 00000000000..464c035974b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_linux.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100644
index 00000000000..5c8a7f3783f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL) /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+
+
+typedef struct block_allocator
+{
+ struct semaphore mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 num_blocks;
+ u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend * backend);
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
+static void block_allocator_release(void * ctx, ump_dd_mem * handle);
+static inline u32 get_phys(block_allocator * allocator, block_info * block);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+{
+ ump_memory_backend * backend;
+ block_allocator * allocator;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+ num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+ if (0 == usable_size)
+ {
+ DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+ return NULL;
+ }
+
+ DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+ DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+ backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL != backend)
+ {
+ allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+ if (NULL != allocator)
+ {
+ allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
+ if (NULL != allocator->all_blocks)
+ {
+ int i;
+
+ allocator->first_free = NULL;
+ allocator->num_blocks = num_blocks;
+ allocator->num_free = num_blocks;
+ allocator->base = base_address;
+ sema_init(&allocator->mutex, 1);
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ allocator->all_blocks[i].next = allocator->first_free;
+ allocator->first_free = &allocator->all_blocks[i];
+ }
+
+ backend->ctx = allocator;
+ backend->allocate = block_allocator_allocate;
+ backend->release = block_allocator_release;
+ backend->shutdown = block_allocator_shutdown;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+ }
+ kfree(allocator);
+ }
+ kfree(backend);
+ }
+
+ return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend * backend)
+{
+ block_allocator * allocator;
+
+ BUG_ON(!backend);
+ BUG_ON(!backend->ctx);
+
+ allocator = (block_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+ kfree(allocator->all_blocks);
+ kfree(allocator);
+ kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+{
+ block_allocator * allocator;
+ u32 left;
+ block_info * last_allocated = NULL;
+ int i = 0;
+
+ BUG_ON(!ctx);
+ BUG_ON(!mem);
+
+ allocator = (block_allocator*)ctx;
+ left = mem->size_bytes;
+
+ BUG_ON(!left);
+ BUG_ON(!&allocator->mutex);
+
+ mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+ mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ if (NULL == mem->block_array)
+ {
+ MSG_ERR(("Failed to allocate block array\n"));
+ return 0;
+ }
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Could not get mutex to do block_allocate\n"));
+ return 0;
+ }
+
+ mem->size_bytes = 0;
+
+ while ((left > 0) && (allocator->first_free))
+ {
+ block_info * block;
+
+ block = allocator->first_free;
+ allocator->first_free = allocator->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+ allocator->num_free--;
+
+ mem->block_array[i].addr = get_phys(allocator, block);
+ mem->block_array[i].size = UMP_BLOCK_SIZE;
+ mem->size_bytes += UMP_BLOCK_SIZE;
+
+ i++;
+
+ if (left < UMP_BLOCK_SIZE) left = 0;
+ else left -= UMP_BLOCK_SIZE;
+ }
+
+ if (left)
+ {
+ block_info * block;
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ block = last_allocated->next;
+ last_allocated->next = allocator->first_free;
+ allocator->first_free = last_allocated;
+ last_allocated = block;
+ allocator->num_free++;
+ }
+
+ vfree(mem->block_array);
+ mem->backend_info = NULL;
+ mem->block_array = NULL;
+
+ DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+ up(&allocator->mutex);
+
+ return 0;
+ }
+
+ mem->backend_info = last_allocated;
+
+ up(&allocator->mutex);
+ mem->is_cached=0;
+
+ return 1;
+}
+
+
+
+static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+{
+ block_allocator * allocator;
+ block_info * block, * next;
+
+ BUG_ON(!ctx);
+ BUG_ON(!handle);
+
+ allocator = (block_allocator*)ctx;
+ block = (block_info*)handle->backend_info;
+ BUG_ON(!block);
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+ return;
+ }
+
+ while (block)
+ {
+ next = block->next;
+
+ BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+ block->next = allocator->first_free;
+ allocator->first_free = block;
+ allocator->num_free++;
+
+ block = next;
+ }
+ DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+ up(&allocator->mutex);
+
+ vfree(handle->block_array);
+ handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator * allocator, block_info * block)
+{
+ return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100644
index 00000000000..58ebe15e5a2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_dedicated.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c
new file mode 100644
index 00000000000..99c9c223036
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator
+{
+ struct semaphore mutex;
+ u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
+ u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void* ctx, ump_dd_mem * descriptor);
+static int os_allocate(void* ctx, ump_dd_mem * descriptor);
+static void os_memory_backend_destroy(ump_memory_backend * backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend * backend;
+ os_allocator * info;
+
+ info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ info->num_pages_max = max_allocation >> PAGE_SHIFT;
+ info->num_pages_allocated = 0;
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend)
+ {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = os_allocate;
+ backend->release = os_free;
+ backend->shutdown = os_memory_backend_destroy;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend * backend)
+{
+ os_allocator * info = (os_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+ kfree(info);
+ kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+{
+ u32 left;
+ os_allocator * info;
+ int pages_allocated = 0;
+ int is_cached;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size_bytes;
+ is_cached = descriptor->is_cached;
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return 0; /* failure */
+ }
+
+ descriptor->backend_info = NULL;
+ descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+ descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+ if (NULL == descriptor->block_array)
+ {
+ up(&info->mutex);
+ DBG_MSG(1, ("Block array could not be allocated\n"));
+ return 0; /* failure */
+ }
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
+ {
+ struct page * new_page;
+
+ if (is_cached)
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
+ } else
+ {
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+ }
+ if (NULL == new_page)
+ {
+ break;
+ }
+
+ /* Ensure page caches are flushed. */
+ if ( is_cached )
+ {
+ descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ } else
+ {
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ }
+
+ DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+ if (left < PAGE_SIZE)
+ {
+ left = 0;
+ }
+ else
+ {
+ left -= PAGE_SIZE;
+ }
+
+ pages_allocated++;
+ }
+
+ DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
+
+ if (left)
+ {
+ DBG_MSG(1, ("Failed to allocate needed pages\n"));
+
+ while(pages_allocated)
+ {
+ pages_allocated--;
+ if ( !is_cached )
+ {
+ dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+ }
+
+ up(&info->mutex);
+
+ return 0; /* failure */
+ }
+
+ info->num_pages_allocated += pages_allocated;
+
+ DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ up(&info->mutex);
+
+ return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void* ctx, ump_dd_mem * descriptor)
+{
+ os_allocator * info;
+ int i;
+
+ BUG_ON(!ctx);
+ BUG_ON(!descriptor);
+
+ info = (os_allocator*)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+ info->num_pages_allocated -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ for ( i = 0; i < descriptor->nr_blocks; i++)
+ {
+ DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+ if ( ! descriptor->is_cached)
+ {
+ dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+ }
+
+ vfree(descriptor->block_array);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h
new file mode 100644
index 00000000000..d6083477d72
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_kernel_memory_backend_os.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c
new file mode 100644
index 00000000000..1c1190a537f
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_memory_backend.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/ioport.h> /* request_mem_region */
+
+#include "arch/config.h" /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend* ump_memory_backend_create ( void )
+{
+ ump_memory_backend * backend = NULL;
+
+ /* Create the dynamic memory allocator backend */
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+ DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+ /* Ask the OS if we can use the specified physical memory */
+ if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory"))
+ {
+ MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+ return NULL;
+ }
+ backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+ }
+ else if (1 == ump_backend)
+ {
+ DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_os_memory_backend_create(ump_memory_size);
+ }
+
+ return backend;
+}
+
+void ump_memory_backend_destroy( void )
+{
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+ release_mem_region(ump_memory_address, ump_memory_size);
+ }
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c
new file mode 100644
index 00000000000..b3300ab691a
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_atomics.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c
new file mode 100644
index 00000000000..7a1c5e97886
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_low_level_mem.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker
+{
+ atomic_t references;
+ ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct * vma);
+static void ump_vma_close(struct vm_area_struct * vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops =
+{
+ .open = ump_vma_open,
+ .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = ump_cpu_page_fault_handler
+#else
+ .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+ MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ _ump_uk_unmap_mem_s args;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+ if (0 == new_val)
+ {
+ ump_memory_allocation * descriptor;
+
+ descriptor = vma_usage_tracker->descriptor;
+
+ args.ctx = descriptor->ump_session;
+ args.cookie = descriptor->cookie;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ args._ukk_private = NULL; /** @note unused */
+
+ DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+ _ump_ukk_unmap_mem( & args );
+
+ /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+ }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ struct vm_area_struct *vma;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+ if (NULL == vma_usage_tracker)
+ {
+ DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+ return -_MALI_OSK_ERR_FAULT;
+ }
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ vma->vm_private_data = vma_usage_tracker;
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ if (0==descriptor->is_cached)
+ {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+
+ /* Setup the functions which handle further VMA handling */
+ vma->vm_ops = &ump_vm_ops;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+
+ atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+ vma_usage_tracker->descriptor = descriptor;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ ump_vma_usage_tracker * vma_usage_tracker;
+
+ if (NULL == descriptor) return;
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ kfree(vma_usage_tracker);
+ return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+{
+ struct vm_area_struct *vma;
+ _mali_osk_errcode_t retval;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+ DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+ return retval;
+}
+
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op )
+{
+ int i;
+ DBG_MSG(3, ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n", mem->nr_blocks, mem->block_array[0].addr, phys_to_virt(mem->block_array[0].addr), mem->block_array[0].size));
+
+ /* TODO: Use args->size and args->address to select a subrange of this allocation to flush */
+ for (i=0 ; i<mem->nr_blocks; i++)
+ {
+ /* TODO: Find out which flush method is best of 1)Dma OR 2)Normal flush functions */
+ /* TODO: Use args->op to select the flushing method: CLEAN_AND_INVALIDATE or CLEAN */
+ /*#define USING_DMA_FLUSH*/
+ #ifdef USING_DMA_FLUSH
+ DEBUG_ASSERT( (PAGE_SIZE==mem->block_array[i].size));
+ dma_map_page(NULL, pfn_to_page(mem->block_array[i].addr >> PAGE_SHIFT), 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);*/
+ #else
+ /* Normal style flush */
+ ump_dd_physical_block *block;
+ u32 start_p, end_p;
+ const void *start_v, *end_v;
+ block = &mem->block_array[i];
+
+ start_p = (u32)block->addr;
+ start_v = phys_to_virt( start_p ) ;
+
+ end_p = start_p + block->size-1;
+ end_v = phys_to_virt( end_p ) ;
+
+ dmac_flush_range(start_v, end_v);
+ outer_flush_range(start_p, end_p);
+ #endif
+ }
+
+ return ;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c
new file mode 100644
index 00000000000..78569c4457b
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_osk_misc.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init( void )
+{
+ if (0 != ump_kernel_device_initialize())
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term( void )
+{
+ ump_kernel_device_terminate();
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c
new file mode 100644
index 00000000000..155635026aa
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_allocate_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ err = _ump_ukk_allocate( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+ return map_errcode(err);
+ }
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+ _ump_uk_release_s release_args;
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ release_args.ctx = (void *) session_data;
+ release_args.secure_id = user_interaction.secure_id;
+
+ err = _ump_ukk_release( &release_args );
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+ }
+
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h
new file mode 100644
index 00000000000..b2bef1152c3
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_ref_wrappers.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c
new file mode 100644
index 00000000000..d14b631246c
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_api_version_s version_info;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&version_info, argument, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ version_info.ctx = (void*) session_data;
+ err = _ump_uku_get_api_version( &version_info );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+ return map_errcode(err);
+ }
+
+ version_info.ctx = NULL;
+
+ /* Copy ouput data back to user space */
+ if (0 != copy_to_user(argument, &version_info, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_release_s release_args;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&release_args, argument, sizeof(release_args)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ release_args.ctx = (void*) session_data;
+ err = _ump_ukk_release( &release_args );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+ return map_errcode(err);
+ }
+
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_size_get_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+ err = _ump_ukk_size_get( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+ return map_errcode(err);
+ }
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+ int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_msync_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_msync( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h
new file mode 100644
index 00000000000..31afe2dca56
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/linux/ump_ukk_wrappers.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt
new file mode 100644
index 00000000000..bf1bf61d60e
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/driver/src/devicedrv/ump/readme.txt
@@ -0,0 +1,17 @@
+Building the UMP Device Driver for Linux
+----------------------------------------
+
+Build the UMP Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> CONFIG=<your_config> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ your_config: Name of the sub-folder to find the required config.h file
+ ("arch-" will be prepended)
+
+The config.h file contains the configuration parameters needed, like the
+memory backend to use, and the amount of memory.
+
+The result will be a ump.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
diff --git a/drivers/gpu/mali/mali400ko/mali.spec b/drivers/gpu/mali/mali400ko/mali.spec
new file mode 100644
index 00000000000..62e8fa0e1b4
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/mali.spec
@@ -0,0 +1,57 @@
+%define kernel_target u8500
+%define kernel_version %(find /lib/modules -name "*%{kernel_target}" | cut -c 14-)
+
+Name: mali400ko
+License: GPL
+Summary: Mali400 kernel module
+Version: 1.0
+Release: 1
+URL: http://stericsson.com
+BuildRoot: %{_tmppath}/%{name}
+Requires: kernel
+BuildRequires: kernel-u8500-devel
+Requires(post): ldconfig
+Requires(postun): ldconfig
+
+Source0: mali400ko.tar.gz
+
+%description
+Mali400 kernel module.
+
+%files
+%defattr(-,root,root)
+/lib/modules/%{kernel_version}/extra/mali.ko
+/lib/modules/%{kernel_version}/extra/mali_drm.ko
+
+%prep
+%setup -q
+
+%build
+export CROSS_COMPILE=""
+export KERNELDIR=/usr/src/kernels/%{kernel_version}
+export KERNEL_BUILD_DIR=/usr/src/kernels/%{kernel_version}
+export USING_UMP=1
+export USING_HWMEM=1
+#make V=0 mali-devicedrv
+
+%install
+export CROSS_COMPILE=""
+export KERNELDIR=/usr/src/kernels/%{kernel_version}
+export KERNEL_BUILD_DIR=/usr/src/kernels/%{kernel_version}
+export USING_UMP=1
+export USING_HWMEM=1
+export INSTALL_MOD_DIR=extra
+export INSTALL_MOD_PATH=$RPM_BUILD_ROOT
+make V=0 install-mali install-mali_drm
+
+# Remove kernel modules.* files
+rm -f $RPM_BUILD_ROOT/lib/modules/%{kernel_version}/modules.*
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%post
+
+%postun
+
+
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt b/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt
new file mode 100644
index 00000000000..47009fe7686
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/README.txt
@@ -0,0 +1,24 @@
+Notes on integrating the Mali DRM module:
+
+The Mali DRM is a platform device, meaning that you have to add an entry for it in your kernel architecture specification.
+
+Example: (arch/arm/mach-<platform>/mach-<platform>.c)
+
+#ifdef CONFIG_DRM_MALI
+static struct platform_device <platform>_device_mali_drm = {
+ .name = "mali_drm",
+ .id = -1,
+};
+#endif
+
+static struct platform_device *<platform>_devices[] __initdata = {
+...
+#ifdef CONFIG_DRM_MALI
+ &<platform>_device_mali_drm,
+#endif
+...
+};
+
+Where <platform> is substituted with the selected platform.
+
+The "mali" folder should be placed under drivers/gpu/drm/
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile
new file mode 100644
index 00000000000..0f3ace966df
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/Makefile
@@ -0,0 +1,17 @@
+#
+# * Copyright (C) 2010 ARM Limited. All rights reserved.
+# *
+# * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# *
+# * A copy of the licence is included with the program, and can also be obtained from Free Software
+# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+#
+# Makefile for the Mali drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+obj-y += mali_drm.o
+mali_drm-objs := mali_drv.o
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/include -I$(KBUILD_EXTMOD)/../drm/include/
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c
new file mode 100644
index 00000000000..571ab04e1e2
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.c
@@ -0,0 +1,158 @@
+/**
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_drv.c
+ * Implementation of the Linux device driver entrypoints for Mali DRM
+ */
+#include <linux/module.h>
+#include <linux/vermagic.h>
+#include <drm/drmP.h>
+#include "mali_drv.h"
+
+static struct platform_device *dev0;
+
+void mali_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+}
+
+void mali_drm_lastclose(struct drm_device *dev)
+{
+}
+
+static int mali_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mali_drm_resume(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int mali_drm_load(struct drm_device *dev, unsigned long chipset)
+{
+ return 0;
+}
+
+static int mali_drm_unload(struct drm_device *dev)
+{
+ return 0;
+}
+
+static const struct file_operations mali_driver_fops =
+{
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+
+static struct drm_driver driver =
+{
+ .load = mali_drm_load,
+ .unload = mali_drm_unload,
+ .context_dtor = NULL,
+ .reclaim_buffers = NULL,
+ .reclaim_buffers_idlelocked = NULL,
+ .preclose = mali_drm_preclose,
+ .lastclose = mali_drm_lastclose,
+ .suspend = mali_drm_suspend,
+ .resume = mali_drm_resume,
+ .ioctls = NULL,
+ .fops = &mali_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct platform_driver platform_drm_driver;
+
+int mali_drm_init(struct platform_device *dev)
+{
+ printk(KERN_INFO "Mali DRM initialize, driver name: %s, version %d.%d\n", DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR);
+ if (dev == dev0) {
+ driver.num_ioctls = 0;
+ return drm_platform_init(&driver, dev0);
+ }
+ return 0;
+
+}
+
+void mali_drm_exit(struct platform_device *dev)
+{
+ if (dev0 == dev)
+ drm_platform_exit(&driver, dev);
+}
+
+static int __devinit mali_platform_drm_probe(struct platform_device *dev)
+{
+ return mali_drm_init(dev);
+}
+
+static int mali_platform_drm_remove(struct platform_device *dev)
+{
+ mali_drm_exit(dev);
+
+ return 0;
+}
+
+static int mali_platform_drm_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mali_platform_drm_resume(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver platform_drm_driver = {
+ .probe = mali_platform_drm_probe,
+ .remove = __devexit_p(mali_platform_drm_remove),
+ .suspend = mali_platform_drm_suspend,
+ .resume = mali_platform_drm_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init mali_platform_drm_init(void)
+{
+ dev0 = platform_device_register_simple("mali_drm", 0, NULL, 0);
+ return platform_driver_register(&platform_drm_driver);
+}
+
+static void __exit mali_platform_drm_exit(void)
+{
+ platform_driver_unregister(&platform_drm_driver);
+ platform_device_unregister(dev0);
+}
+
+#ifdef MODULE
+module_init(mali_platform_drm_init);
+#else
+late_initcall(mali_platform_drm_init);
+#endif
+module_exit(mali_platform_drm_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE(DRIVER_LICENSE);
+MODULE_ALIAS(DRIVER_ALIAS);
+MODULE_INFO(vermagic, VERMAGIC_STRING);
diff --git a/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h
new file mode 100644
index 00000000000..188f427ee6d
--- /dev/null
+++ b/drivers/gpu/mali/mali400ko/x11/mali_drm/mali/mali_drv.h
@@ -0,0 +1,25 @@
+/**
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI_DRV_H_
+#define _MALI_DRV_H_
+
+#define DRIVER_AUTHOR "ARM Ltd."
+#define DRIVER_NAME "mali_drm"
+#define DRIVER_DESC "DRM module for Mali-200, Mali-400"
+#define DRIVER_LICENSE "GPL v2"
+#define DRIVER_ALIAS "platform:mali_drm"
+#define DRIVER_DATE "20101111"
+#define DRIVER_VERSION "0.2"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 1
+
+#endif /* _MALI_DRV_H_ */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8deedc1b984..58b2a6c93c8 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -39,6 +39,44 @@ config HWMON_DEBUG_CHIP
comment "Native drivers"
+config SENSORS_AB8500
+ tristate "AB8500 thermal monitoring"
+ depends on AB8500_GPADC
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the AB8500 chip. The driver includes thermal management for
+ AB8500 die and two GPADC channels. The GPADC channel are preferably
+ used to access sensors outside the AB8500 chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called abx500-temp.
+
+config SENSORS_AB5500
+ tristate "AB5500 thermal monitoring"
+ depends on AB5500_GPADC
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the AB5500 chip. The driver includes thermal management for
+ AB5500 die, pcb and RF XTAL temperature.
+
+ This driver can also be built as a module. If so, the module
+ will be called abx500-temp.
+
+config SENSORS_DBX500
+ tristate "DBX500 thermal monitoring"
+ depends on MFD_DB8500_PRCMU || MFD_DB5500_PRCMU
+ default n
+ help
+ If you say yes here you get support for the thermal sensor part
+ of the DBX500 chip. The driver includes thermal management for
+ DBX500 die.
+
+ This driver can also be built as a module. If so, the module
+ will be called dbx500_temp.
+
+
config SENSORS_ABITUGURU
tristate "Abit uGuru (rev 1 & 2)"
depends on X86 && DMI && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 6d3f11f7181..1e893cbdb83 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_SENSORS_W83795) += w83795.o
obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
+obj-$(CONFIG_SENSORS_AB8500) += abx500.o ab8500.o
+obj-$(CONFIG_SENSORS_AB5500) += abx500.o ab5500.o
+obj-$(CONFIG_SENSORS_DBX500) += dbx500.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7314) += ad7314.o
diff --git a/drivers/hwmon/ab5500.c b/drivers/hwmon/ab5500.c
new file mode 100644
index 00000000000..cafadeba51c
--- /dev/null
+++ b/drivers/hwmon/ab5500.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * If/when the AB5500 thermal warning temperature is reached (threshold
+ * 125C cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event. If a shut down is not
+ * triggered by user space and temperature reaches beyond critical
+ * limit(130C) pm_power off is called.
+ *
+ * If/when AB5500 thermal shutdown temperature is reached a hardware
+ * shutdown of the AB5500 will occur.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include "abx500.h"
+#include <asm/mach-types.h>
+
+/* AB5500 driver monitors GPADC - XTAL_TEMP, PCB_TEMP,
+ * BTEMP_BALL, BAT_CTRL and DIE_TEMP
+ */
+#define NUM_MONITORED_SENSORS 5
+
+#define SHUTDOWN_AUTO_MIN_LIMIT -25
+#define SHUTDOWN_AUTO_MAX_LIMIT 130
+
+static int ab5500_output_convert(int val, u8 sensor)
+{
+ int res = val;
+ /* GPADC returns die temperature in Celsius
+ * convert it to millidegree celsius
+ */
+ if (sensor == DIE_TEMP)
+ res = val * 1000;
+
+ return res;
+}
+
+static int ab5500_read_sensor(struct abx500_temp *data, u8 sensor)
+{
+ int val;
+ /*
+ * Special treatment for BAT_CTRL node, since this
+ * temperature measurement is more complex than just
+ * an ADC readout
+ */
+ if (sensor == BAT_CTRL)
+ val = ab5500_btemp_get_batctrl_temp(data->ab5500_btemp);
+ else
+ val = ab5500_gpadc_convert(data->ab5500_gpadc, sensor);
+
+ if (val < 0)
+ return val;
+ else
+ return ab5500_output_convert(val, sensor);
+}
+
+static ssize_t ab5500_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "ab5500\n");
+}
+
+static ssize_t ab5500_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ char *name;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+
+ /*
+ * Make sure these labels correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR.
+ * Temperature sensors outside ab8500 (read via GPADC) are marked
+ * with prefix ext_
+ */
+ switch (index) {
+ case 1:
+ name = "xtal_temp";
+ break;
+ case 2:
+ name = "pcb_temp";
+ break;
+ case 3:
+ name = "bat_temp";
+ break;
+ case 4:
+ name = "bat_ctrl";
+ break;
+ case 5:
+ name = "ab5500";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%s\n", name);
+}
+
+static int temp_shutdown_trig(int mux)
+{
+ pm_power_off();
+ return 0;
+}
+
+static int ab5500_temp_shutdown_auto(struct abx500_temp *data)
+{
+ int ret;
+ struct adc_auto_input *auto_ip;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(&data->pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = DIE_TEMP;
+ auto_ip->freq = MS500;
+ /*
+ * As per product specification, voltage decreases as
+ * temperature increases. Hence the min and max values
+ * should be passed in reverse order.
+ */
+ auto_ip->min = SHUTDOWN_AUTO_MAX_LIMIT;
+ auto_ip->max = SHUTDOWN_AUTO_MIN_LIMIT;
+ auto_ip->auto_adc_callback = temp_shutdown_trig;
+ data->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(data->ab5500_gpadc,
+ data->gpadc_auto);
+ if (ret < 0)
+ kfree(auto_ip);
+
+ return ret;
+}
+
+static int ab5500_is_visible(struct attribute *attr, int n)
+{
+ return attr->mode;
+}
+
+static int ab5500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+ /*
+ * Make sure the magic numbers below corresponds to the node
+ * used for AB5500 thermal warning from HW.
+ */
+ mutex_lock(&data->lock);
+ data->crit_alarm[4] = 1;
+ mutex_unlock(&data->lock);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm");
+ dev_info(&data->pdev->dev, "ABX500 thermal warning,"
+ " power off system now!\n");
+ return 0;
+}
+
+int __init ab5500_hwmon_init(struct abx500_temp *data)
+{
+ int err;
+
+ data->ab5500_gpadc = ab5500_gpadc_get("ab5500-adc.0");
+ if (IS_ERR(data->ab5500_gpadc))
+ return PTR_ERR(data->ab5500_gpadc);
+
+ data->ab5500_btemp = ab5500_btemp_get();
+ if (IS_ERR(data->ab5500_btemp))
+ return PTR_ERR(data->ab5500_btemp);
+
+ err = ab5500_temp_shutdown_auto(data);
+ if (err < 0) {
+ dev_err(&data->pdev->dev, "Failed to register"
+ " auto trigger(%d)\n", err);
+ return err;
+ }
+
+ /*
+ * Setup HW defined data.
+ *
+ * Reference hardware (HREF):
+ *
+ * XTAL_TEMP, PCB_TEMP, BTEMP_BALL refer to millivolts and
+ * BAT_CTRL and DIE_TEMP refer to millidegrees
+ *
+ * Make sure indexes correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR
+ */
+ data->gpadc_addr[0] = XTAL_TEMP;
+ data->gpadc_addr[1] = PCB_TEMP;
+ data->gpadc_addr[2] = BTEMP_BALL;
+ data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[4] = DIE_TEMP;
+ data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+ data->ops.read_sensor = ab5500_read_sensor;
+ data->ops.irq_handler = ab5500_temp_irq_handler;
+ data->ops.show_name = ab5500_show_name;
+ data->ops.show_label = ab5500_show_label;
+ data->ops.is_visible = ab5500_is_visible;
+
+ return 0;
+}
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
new file mode 100644
index 00000000000..65a0f381d01
--- /dev/null
+++ b/drivers/hwmon/ab8500.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * If/when the AB8500 thermal warning temperature is reached (threshold
+ * cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event. If a shut down is not
+ * triggered by user space within a certain time frame,
+ * pm_power off is called.
+ *
+ * If/when AB8500 thermal shutdown temperature is reached a hardware
+ * shutdown of the AB8500 will occur.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include "abx500.h"
+#include <asm/mach-types.h>
+
+#define DEFAULT_POWER_OFF_DELAY 10000
+
+/*
+ * The driver monitors GPADC - ADC_AUX1, ADC_AUX2, BTEMP_BALL
+ * and BAT_CTRL.
+ */
+#define NUM_MONITORED_SENSORS 4
+
+static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor)
+{
+ int val;
+ /*
+ * Special treatment for the BAT_CTRL node, since this
+ * temperature measurement is more complex than just
+ * an ADC readout
+ */
+ if (sensor == BAT_CTRL)
+ val = ab8500_btemp_get_batctrl_temp(data->ab8500_btemp);
+ else
+ val = ab8500_gpadc_convert(data->ab8500_gpadc, sensor);
+
+ return val;
+}
+
+static void ab8500_thermal_power_off(struct work_struct *work)
+{
+ struct abx500_temp *data = container_of(work, struct abx500_temp,
+ power_off_work.work);
+
+ dev_warn(&data->pdev->dev, "Power off due to AB8500 thermal warning\n");
+ pm_power_off();
+}
+
+static ssize_t ab8500_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "ab8500\n");
+}
+
+static ssize_t ab8500_show_label(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ char *name;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int index = attr->index;
+
+ /*
+ * Make sure these labels correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR.
+ * Temperature sensors outside ab8500 (read via GPADC) are marked
+ * with prefix ext_
+ */
+ switch (index) {
+ case 1:
+ name = "ext_rtc_xtal";
+ break;
+ case 2:
+ name = "ext_db8500";
+ break;
+ case 3:
+ name = "bat_temp";
+ break;
+ case 4:
+ name = "bat_ctrl";
+ break;
+ case 5:
+ name = "ab8500";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return sprintf(buf, "%s\n", name);
+}
+
+static int ab8500_is_visible(struct attribute *attr, int n)
+{
+ if (!strcmp(attr->name, "temp5_input") ||
+ !strcmp(attr->name, "temp5_min") ||
+ !strcmp(attr->name, "temp5_max") ||
+ !strcmp(attr->name, "temp5_max_hyst") ||
+ !strcmp(attr->name, "temp5_min_alarm") ||
+ !strcmp(attr->name, "temp5_max_alarm") ||
+ !strcmp(attr->name, "temp5_max_hyst_alarm"))
+ return 0;
+
+ return attr->mode;
+}
+
+static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+ unsigned long delay_in_jiffies;
+ /*
+ * Make sure the magic numbers below corresponds to the node
+ * used for AB8500 thermal warning from HW.
+ */
+ mutex_lock(&data->lock);
+ data->crit_alarm[4] = 1;
+ mutex_unlock(&data->lock);
+
+ hwmon_notify(data->crit_alarm[4], NULL);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm");
+ dev_info(&data->pdev->dev, "AB8500 thermal warning,"
+ " power off in %lu s\n", data->power_off_delay);
+ delay_in_jiffies = msecs_to_jiffies(data->power_off_delay);
+ schedule_delayed_work(&data->power_off_work, delay_in_jiffies);
+ return 0;
+}
+
+int __init ab8500_hwmon_init(struct abx500_temp *data)
+{
+ data->ab8500_gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ if (IS_ERR(data->ab8500_gpadc))
+ return PTR_ERR(data->ab8500_gpadc);
+
+ data->ab8500_btemp = ab8500_btemp_get();
+ if (IS_ERR(data->ab8500_btemp))
+ return PTR_ERR(data->ab8500_btemp);
+
+ INIT_DELAYED_WORK(&data->power_off_work, ab8500_thermal_power_off);
+
+ /*
+ * Setup HW defined data.
+ *
+ * Reference hardware (HREF):
+ *
+ * GPADC - ADC_AUX1, connected to NTC R2148 next to RTC_XTAL on HREF
+ * GPADC - ADC_AUX2, connected to NTC R2150 near DB8500 on HREF
+ * Hence, temp#_min/max/max_hyst refer to millivolts and not
+ * millidegrees
+ * This is not the case for BAT_CTRL where millidegrees is used
+ *
+ * HREF HW does not support reading AB8500 temperature. BUT an
+ * AB8500 IRQ will be launched if die crit temp limit is reached.
+ *
+ * Make sure indexes correspond to the attribute indexes
+ * used when calling SENSOR_DEVICE_ATRR
+ */
+ data->gpadc_addr[0] = ADC_AUX1;
+ data->gpadc_addr[1] = ADC_AUX2;
+ data->gpadc_addr[2] = BTEMP_BALL;
+ data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[4] = DIE_TEMP;
+ data->power_off_delay = DEFAULT_POWER_OFF_DELAY;
+ data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+ data->ops.read_sensor = ab8500_read_sensor;
+ data->ops.irq_handler = ab8500_temp_irq_handler;
+ data->ops.show_name = ab8500_show_name;
+ data->ops.show_label = ab8500_show_label;
+ data->ops.is_visible = ab8500_is_visible;
+
+ return 0;
+}
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
new file mode 100644
index 00000000000..7aa9994c54a
--- /dev/null
+++ b/drivers/hwmon/abx500.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Persson <martin.persson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ * Note:
+ *
+ * ABX500 does not provide auto ADC, so to monitor the required
+ * temperatures, a periodic work is used. It is more important
+ * to not wake up the CPU than to perform this job, hence the use
+ * of a deferred delay.
+ *
+ * A deferred delay for thermal monitor is considered safe because:
+ * If the chip gets too hot during a sleep state it's most likely
+ * due to external factors, such as the surrounding temperature.
+ * I.e. no SW decisions will make any difference.
+ *
+ * If/when the ABX500 thermal warning temperature is reached (threshold
+ * cannot be changed by SW), an interrupt is set and the driver
+ * notifies user space via a sysfs event.
+ *
+ * If/when ABX500 thermal shutdown temperature is reached a hardware
+ * shutdown of the ABX500 will occur.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <asm/mach-types.h>
+
+#include "abx500.h"
+
+#define DEFAULT_MONITOR_DELAY 1000
+
+/*
+ * Thresholds are considered inactive if set to 0.
+ * To avoid confusion for user space applications,
+ * the temp monitor delay is set to 0 if all thresholds
+ * are 0.
+ */
+static bool find_active_thresholds(struct abx500_temp *data)
+{
+ int i;
+ for (i = 0; i < data->monitored_sensors; i++)
+ if (data->max[i] != 0 || data->max_hyst[i] != 0
+ || data->min[i] != 0)
+ return true;
+
+ dev_dbg(&data->pdev->dev, "No active thresholds,"
+ "cancel deferred job (if it exists)"
+ "and reset temp monitor delay\n");
+ cancel_delayed_work_sync(&data->work);
+ return false;
+}
+
+static inline void schedule_monitor(struct abx500_temp *data)
+{
+ unsigned long delay_in_jiffies;
+ delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay);
+ schedule_delayed_work(&data->work, delay_in_jiffies);
+}
+
+static inline void gpadc_monitor_exit(struct abx500_temp *data)
+{
+ cancel_delayed_work_sync(&data->work);
+}
+
+static void gpadc_monitor(struct work_struct *work)
+{
+ unsigned long delay_in_jiffies;
+ int val, i, ret;
+ /* Container for alarm node name */
+ char alarm_node[30];
+
+ bool updated_min_alarm = false;
+ bool updated_max_alarm = false;
+ bool updated_max_hyst_alarm = false;
+ struct abx500_temp *data = container_of(work, struct abx500_temp,
+ work.work);
+
+ for (i = 0; i < data->monitored_sensors; i++) {
+ /* Thresholds are considered inactive if set to 0 */
+ if (data->max[i] == 0 && data->max_hyst[i] == 0
+ && data->min[i] == 0)
+ continue;
+
+ val = data->ops.read_sensor(data, data->gpadc_addr[i]);
+ if (val < 0) {
+ dev_err(&data->pdev->dev, "GPADC read failed\n");
+ continue;
+ }
+
+ mutex_lock(&data->lock);
+ if (data->min[i] != 0) {
+ if (val < data->min[i]) {
+ if (data->min_alarm[i] == 0) {
+ data->min_alarm[i] = 1;
+ updated_min_alarm = true;
+ }
+ } else {
+ if (data->min_alarm[i] == 1) {
+ data->min_alarm[i] = 0;
+ updated_min_alarm = true;
+ }
+ }
+
+ }
+ if (data->max[i] != 0) {
+ if (val > data->max[i]) {
+ if (data->max_alarm[i] == 0) {
+ data->max_alarm[i] = 1;
+ updated_max_alarm = true;
+ }
+ } else {
+ if (data->max_alarm[i] == 1) {
+ data->max_alarm[i] = 0;
+ updated_max_alarm = true;
+ }
+ }
+
+ }
+ if (data->max_hyst[i] != 0) {
+ if (val > data->max_hyst[i]) {
+ if (data->max_hyst_alarm[i] == 0) {
+ data->max_hyst_alarm[i] = 1;
+ updated_max_hyst_alarm = true;
+ }
+ } else {
+ if (data->max_hyst_alarm[i] == 1) {
+ data->max_hyst_alarm[i] = 0;
+ updated_max_hyst_alarm = true;
+ }
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ /* hwmon attr index starts at 1, thus "i+1" below */
+ if (updated_min_alarm) {
+ ret = snprintf(alarm_node, 16, "temp%d_min_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ if (updated_max_alarm) {
+ ret = snprintf(alarm_node, 16, "temp%d_max_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ hwmon_notify(data->max_alarm[i], NULL);
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ if (updated_max_hyst_alarm) {
+ ret = snprintf(alarm_node, 21, "temp%d_max_hyst_alarm",
+ (i + 1));
+ if (ret < 0) {
+ dev_err(&data->pdev->dev,
+ "Unable to update alarm node (%d)",
+ ret);
+ break;
+ }
+ sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+ }
+ }
+ delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay);
+ schedule_delayed_work(&data->work, delay_in_jiffies);
+}
+
+static ssize_t set_temp_monitor_delay(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int res;
+ unsigned long delay_in_s;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+
+ res = strict_strtoul(buf, 10, &delay_in_s);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ data->gpadc_monitor_delay = delay_in_s * 1000;
+
+ if (find_active_thresholds(data))
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_temp_power_off_delay(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int res;
+ unsigned long delay_in_s;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+
+ res = strict_strtoul(buf, 10, &delay_in_s);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ data->power_off_delay = delay_in_s * 1000;
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t show_temp_monitor_delay(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ /* return time in s, not ms */
+ return sprintf(buf, "%lu\n", (data->gpadc_monitor_delay) / 1000);
+}
+
+static ssize_t show_temp_power_off_delay(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ /* return time in s, not ms */
+ return sprintf(buf, "%lu\n", (data->power_off_delay) / 1000);
+}
+
+/* HWMON sysfs interface */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ /*
+ * To avoid confusion between sensor label and chip name, the function
+ * "show_label" is not used to return the chip name.
+ */
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.show_name(dev, devattr, buf);
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.show_label(dev, devattr, buf);
+}
+
+static ssize_t show_input(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ u8 gpadc_addr = data->gpadc_addr[attr->index - 1];
+
+ val = data->ops.read_sensor(data, gpadc_addr);
+ if (val < 0)
+ dev_err(&data->pdev->dev, "GPADC read failed\n");
+
+ return sprintf(buf, "%d\n", val);
+}
+
+/* set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->min_alarm[attr->index - 1] = 0;
+
+ data->min[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->max_alarm[attr->index - 1] = 0;
+
+ data->max[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_max_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ /*
+ * Threshold is considered inactive if set to 0
+ * hwmon attr index starts at 1, thus "attr->index-1" below
+ */
+ if (val == 0)
+ data->max_hyst_alarm[attr->index - 1] = 0;
+
+ data->max_hyst[attr->index - 1] = val;
+
+ if (val == 0)
+ (void) find_active_thresholds(data);
+ else
+ schedule_monitor(data);
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * show functions (RO nodes)
+ */
+static ssize_t show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->min[attr->index - 1]);
+}
+
+static ssize_t show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max[attr->index - 1]);
+}
+
+static ssize_t show_max_hyst(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_hyst[attr->index - 1]);
+}
+
+/* Alarms */
+static ssize_t show_min_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->min_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_hyst_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->max_hyst_alarm[attr->index - 1]);
+}
+
+static ssize_t show_crit_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%ld\n", data->crit_alarm[attr->index - 1]);
+}
+
+static mode_t abx500_attrs_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct abx500_temp *data = dev_get_drvdata(dev);
+ return data->ops.is_visible(a, n);
+}
+
+static SENSOR_DEVICE_ATTR(temp_monitor_delay, S_IRUGO | S_IWUSR,
+ show_temp_monitor_delay, set_temp_monitor_delay, 0);
+static SENSOR_DEVICE_ATTR(temp_power_off_delay, S_IRUGO | S_IWUSR,
+ show_temp_power_off_delay,
+ set_temp_power_off_delay, 0);
+
+/* Chip name, required by hwmon*/
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+/* GPADC - SENSOR1 */
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 1);
+
+/* GPADC - SENSOR2 */
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min, 2);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 2);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_min_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_max_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 2);
+
+/* GPADC - SENSOR3 */
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min, 3);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 3);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_min_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_max_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 3);
+
+/* GPADC - SENSOR4 */
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_min, set_min, 4);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_max, set_max, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 4);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 4);
+
+/* GPADC - SENSOR5 */
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, show_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_input, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_min, S_IWUSR | S_IRUGO, show_min, set_min, 5);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IWUSR | S_IRUGO, show_max, set_max, 5);
+static SENSOR_DEVICE_ATTR(temp5_max_hyst, S_IWUSR | S_IRUGO,
+ show_max_hyst, set_max_hyst, 5);
+static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_min_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_max_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_max_hyst_alarm, S_IRUGO,
+ show_max_hyst_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO,
+ show_crit_alarm, NULL, 5);
+
+struct attribute *abx500_temp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp_monitor_delay.dev_attr.attr,
+ &sensor_dev_attr_temp_power_off_delay.dev_attr.attr,
+ /* GPADC SENSOR1 */
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR2 */
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR3 */
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR4 */
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_hyst_alarm.dev_attr.attr,
+ /* GPADC SENSOR5*/
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_min.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_hyst_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group abx500_temp_group = {
+ .attrs = abx500_temp_attributes,
+ .is_visible = abx500_attrs_visible,
+};
+
+static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct abx500_temp *data = platform_get_drvdata(pdev);
+ data->ops.irq_handler(irq, data);
+ return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct platform_device *pdev)
+{
+ int ret;
+ int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM");
+
+ if (irq < 0)
+ dev_err(&pdev->dev, "Get irq by name failed\n");
+
+ ret = request_threaded_irq(irq, NULL, abx500_temp_irq_handler,
+ IRQF_NO_SUSPEND, "abx500-temp", pdev);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int __devinit abx500_temp_probe(struct platform_device *pdev)
+{
+ struct abx500_temp *data;
+ int err;
+
+ data = kzalloc(sizeof(struct abx500_temp), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ mutex_init(&data->lock);
+
+ /* Chip specific initialization */
+ if (!machine_is_u5500())
+ err = ab8500_hwmon_init(data);
+ else
+ err = ab5500_hwmon_init(data);
+ if (err < 0) {
+ dev_err(&pdev->dev, "abx500 init failed");
+ goto exit;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+ goto exit;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&data->work, gpadc_monitor);
+ data->gpadc_monitor_delay = DEFAULT_MONITOR_DELAY;
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+ goto exit_platform_data;
+ }
+
+ err = setup_irqs(pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "irq setup failed (%d)\n", err);
+ goto exit_sysfs_group;
+ }
+ return 0;
+
+exit_sysfs_group:
+ sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+exit_platform_data:
+ hwmon_device_unregister(data->hwmon_dev);
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(data->gpadc_auto);
+ kfree(data);
+ return err;
+}
+
+static int __devexit abx500_temp_remove(struct platform_device *pdev)
+{
+ struct abx500_temp *data = platform_get_drvdata(pdev);
+
+ gpadc_monitor_exit(data);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data->gpadc_auto);
+ kfree(data);
+ return 0;
+}
+
+/* No action required in suspend/resume, thus the lack of functions */
+static struct platform_driver abx500_temp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "abx500-temp",
+ },
+ .probe = abx500_temp_probe,
+ .remove = __devexit_p(abx500_temp_remove),
+};
+
+static int __init abx500_temp_init(void)
+{
+ return platform_driver_register(&abx500_temp_driver);
+}
+
+static void __exit abx500_temp_exit(void)
+{
+ platform_driver_unregister(&abx500_temp_driver);
+}
+
+MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 temperature driver");
+MODULE_LICENSE("GPL");
+
+module_init(abx500_temp_init)
+module_exit(abx500_temp_exit)
diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h
new file mode 100644
index 00000000000..9fe28dac28f
--- /dev/null
+++ b/drivers/hwmon/abx500.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * License terms: GNU General Public License v2
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ */
+
+#ifndef _ABX500_H
+#define _ABX500_H
+
+#define NUM_SENSORS 5
+
+struct ab8500_gpadc;
+struct ab5500_gpadc;
+struct ab8500_btemp;
+struct ab5500_btemp;
+struct adc_auto_input;
+struct abx500_temp;
+
+/**
+ * struct abx500_temp_ops - abx500 chip specific ops
+ * @read_sensor: reads gpadc output
+ * @irq_handler: irq handler
+ * @show_name: hwmon device name
+ * @show_label: hwmon attribute label
+ * @is_visible: is attribute visible
+ */
+struct abx500_temp_ops {
+ int (*read_sensor)(struct abx500_temp *, u8);
+ int (*irq_handler)(int, struct abx500_temp *);
+ ssize_t (*show_name)(struct device *,
+ struct device_attribute *, char *);
+ ssize_t (*show_label) (struct device *,
+ struct device_attribute *, char *);
+ int (*is_visible)(struct attribute *, int);
+};
+
+/**
+ * struct abx500_temp - representation of temp mon device
+ * @pdev: platform device
+ * @hwmon_dev: hwmon device
+ * @ab8500_gpadc: gpadc interface for ab8500
+ * @ab5500_gpadc: gpadc interface for ab5500
+ * @btemp: battery temperature interface for ab8500
+ * @adc_auto_input: gpadc auto trigger
+ * @gpadc_addr: gpadc channel address
+ * @temp: sensor temperature input value
+ * @min: sensor temperature min value
+ * @max: sensor temperature max value
+ * @max_hyst: sensor temperature hysteresis value for max limit
+ * @crit: sensor temperature critical value
+ * @min_alarm: sensor temperature min alarm
+ * @max_alarm: sensor temperature max alarm
+ * @max_hyst_alarm: sensor temperature hysteresis alarm
+ * @crit_alarm: sensor temperature critical value alarm
+ * @work: delayed work scheduled to monitor temperature periodically
+ * @power_off_work: delayed work scheduled to power off the system
+ when critical temperature is reached
+ * @lock: mutex
+ * @gpadc_monitor_delay: delay between temperature readings in ms
+ * @power_off_delay: delay before power off in ms
+ * @monitored_sensors: number of monitored sensors
+ */
+struct abx500_temp {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ struct ab8500_gpadc *ab8500_gpadc;
+ struct ab5500_gpadc *ab5500_gpadc;
+ struct ab8500_btemp *ab8500_btemp;
+ struct ab5500_btemp *ab5500_btemp;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_temp_ops ops;
+ u8 gpadc_addr[NUM_SENSORS];
+ unsigned long temp[NUM_SENSORS];
+ unsigned long min[NUM_SENSORS];
+ unsigned long max[NUM_SENSORS];
+ unsigned long max_hyst[NUM_SENSORS];
+ unsigned long crit[NUM_SENSORS];
+ unsigned long min_alarm[NUM_SENSORS];
+ unsigned long max_alarm[NUM_SENSORS];
+ unsigned long max_hyst_alarm[NUM_SENSORS];
+ unsigned long crit_alarm[NUM_SENSORS];
+ struct delayed_work work;
+ struct delayed_work power_off_work;
+ struct mutex lock;
+ /* Delay (ms) between temperature readings */
+ unsigned long gpadc_monitor_delay;
+ /* Delay (ms) before power off */
+ unsigned long power_off_delay;
+ int monitored_sensors;
+};
+
+int ab8500_hwmon_init(struct abx500_temp *data) __init;
+int ab5500_hwmon_init(struct abx500_temp *data) __init;
+
+#endif /* _ABX500_H */
diff --git a/drivers/hwmon/dbx500.c b/drivers/hwmon/dbx500.c
new file mode 100644
index 00000000000..c034b48f8dd
--- /dev/null
+++ b/drivers/hwmon/dbx500.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010. All rights reserved.
+ * This code is ST-Ericsson proprietary and confidential.
+ * Any use of the code for whatever purpose is subject to
+ * specific written permission of ST-Ericsson SA.
+ *
+ * Author: WenHai Fang <wenhai.h.fang@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU Gereral Public License (GPL) version 2
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+/*
+ * Default measure period to 0xFF x cycle32k
+ */
+#define DEFAULT_MEASURE_TIME 0xFF
+
+/*
+ * Default critical sensor temperature
+ */
+#define DEFAULT_CRITICAL_TEMP 85
+
+/* This driver monitors DB thermal*/
+#define NUM_SENSORS 1
+
+struct dbx500_temp {
+ struct platform_device *pdev;
+ struct device *hwmon_dev;
+ unsigned char min[NUM_SENSORS];
+ unsigned char max[NUM_SENSORS];
+ unsigned char crit[NUM_SENSORS];
+ unsigned char min_alarm[NUM_SENSORS];
+ unsigned char max_alarm[NUM_SENSORS];
+ unsigned short measure_time;
+ bool monitoring_active;
+ struct mutex lock;
+};
+
+static inline void start_temp_monitoring(struct dbx500_temp *data,
+ const int index)
+{
+ unsigned int i;
+
+ /* determine if there are any sensors worth monitoring */
+ for (i = 0; i < NUM_SENSORS; i++)
+ if (data->min[i] || data->max[i])
+ goto start_monitoring;
+
+ return;
+
+start_monitoring:
+ /* kick off the monitor job */
+ data->min_alarm[index] = 0;
+ data->max_alarm[index] = 0;
+
+ (void) prcmu_start_temp_sense(data->measure_time);
+ data->monitoring_active = true;
+}
+
+static inline void stop_temp_monitoring(struct dbx500_temp *data)
+{
+ if (data->monitoring_active) {
+ (void) prcmu_stop_temp_sense();
+ data->monitoring_active = false;
+ }
+}
+
+/* HWMON sysfs interface */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "dbx500\n");
+}
+
+static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return show_name(dev, devattr, buf);
+}
+
+/* set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ if (val > data->max[attr->index - 1])
+ val = data->max[attr->index - 1];
+
+ data->min[attr->index - 1] = val;
+
+ stop_temp_monitoring(data);
+
+ (void) prcmu_config_hotmon(data->min[attr->index - 1],
+ data->max[attr->index - 1]);
+
+ start_temp_monitoring(data, (attr->index - 1));
+
+ mutex_unlock(&data->lock);
+ return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ if (val < data->min[attr->index - 1])
+ val = data->min[attr->index - 1];
+
+ data->max[attr->index - 1] = val;
+
+ stop_temp_monitoring(data);
+
+ (void) prcmu_config_hotmon(data->min[attr->index - 1],
+ data->max[attr->index - 1]);
+
+ start_temp_monitoring(data, (attr->index - 1));
+
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t set_crit(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int res = strict_strtoul(buf, 10, &val);
+ if (res < 0)
+ return res;
+
+ mutex_lock(&data->lock);
+ val &= 0xFF;
+ data->crit[attr->index - 1] = val;
+ (void) prcmu_config_hotdog(data->crit[attr->index - 1]);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+/*
+ * show functions (RO nodes)
+ * Notice that min/max/crit refer to degrees
+ */
+static ssize_t show_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->min[attr->index - 1]);
+}
+
+static ssize_t show_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->max[attr->index - 1]);
+}
+
+static ssize_t show_crit(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->crit[attr->index - 1]);
+}
+
+/* Alarms */
+static ssize_t show_min_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->min_alarm[attr->index - 1]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct dbx500_temp *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ /* hwmon attr index starts at 1, thus "attr->index-1" below */
+ return sprintf(buf, "%d\n", data->max_alarm[attr->index - 1]);
+}
+
+/* Chip name, required by hwmon*/
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+ show_crit, set_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+
+static struct attribute *dbx500_temp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dbx500_temp_group = {
+ .attrs = dbx500_temp_attributes,
+};
+
+static irqreturn_t prcmu_hotmon_low_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ mutex_lock(&data->lock);
+ data->min_alarm[0] = 1;
+ mutex_unlock(&data->lock);
+
+ sysfs_notify(&pdev->dev.kobj, NULL, "temp1_min_alarm");
+ dev_dbg(&pdev->dev, "DBX500 thermal low warning\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prcmu_hotmon_high_irq_handler(int irq, void *irq_data)
+{
+ struct platform_device *pdev = irq_data;
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ mutex_lock(&data->lock);
+ data->max_alarm[0] = 1;
+ mutex_unlock(&data->lock);
+
+ hwmon_notify(data->max_alarm[0], NULL);
+ sysfs_notify(&pdev->dev.kobj, NULL, "temp1_max_alarm");
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit dbx500_temp_probe(struct platform_device *pdev)
+{
+ struct dbx500_temp *data;
+ int err = 0, i;
+ int irq;
+
+ dev_dbg(&pdev->dev, "dbx500_temp: Function dbx500_temp_probe.\n");
+
+ data = kzalloc(sizeof(struct dbx500_temp), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed\n");
+ goto exit;
+ }
+
+ err = request_threaded_irq(irq, NULL,
+ prcmu_hotmon_low_irq_handler,
+ IRQF_NO_SUSPEND,
+ "dbx500_temp_low", pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_LOW.\n");
+ goto exit;
+ } else {
+ dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_LOW.\n");
+ }
+
+ irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed\n");
+ goto exit;
+ }
+
+ err = request_threaded_irq(irq, NULL,
+ prcmu_hotmon_high_irq_handler,
+ IRQF_NO_SUSPEND,
+ "dbx500_temp_high", pdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_HIGH.\n");
+ goto exit;
+ } else {
+ dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_HIGH.\n");
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+ goto exit;
+ }
+
+ for (i = 0; i < NUM_SENSORS; i++) {
+ data->min[i] = 0;
+ data->max[i] = 0;
+ data->crit[i] = DEFAULT_CRITICAL_TEMP;
+ data->min_alarm[i] = 0;
+ data->max_alarm[i] = 0;
+ }
+
+ mutex_init(&data->lock);
+
+ data->pdev = pdev;
+ data->measure_time = DEFAULT_MEASURE_TIME;
+ data->monitoring_active = false;
+
+ /* set PRCMU to disable platform when we get to the critical temp */
+ (void) prcmu_config_hotdog(DEFAULT_CRITICAL_TEMP);
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &dbx500_temp_group);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+ goto exit_platform_data;
+ }
+
+ return 0;
+
+exit_platform_data:
+ platform_set_drvdata(pdev, NULL);
+exit:
+ kfree(data);
+ return err;
+}
+
+static int __devexit dbx500_temp_remove(struct platform_device *pdev)
+{
+ struct dbx500_temp *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &dbx500_temp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+ return 0;
+}
+
+/* No action required in suspend/resume, thus the lack of functions */
+static struct platform_driver dbx500_temp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dbx500_temp",
+ },
+ .probe = dbx500_temp_probe,
+ .remove = __devexit_p(dbx500_temp_remove),
+};
+
+static int __init dbx500_temp_init(void)
+{
+ return platform_driver_register(&dbx500_temp_driver);
+}
+
+static void __exit dbx500_temp_exit(void)
+{
+ platform_driver_unregister(&dbx500_temp_driver);
+}
+
+MODULE_AUTHOR("WenHai Fang <wenhai.h.fang@stericsson.com>");
+MODULE_DESCRIPTION("DBX500 temperature driver");
+MODULE_LICENSE("GPL");
+
+module_init(dbx500_temp_init)
+module_exit(dbx500_temp_exit)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c3c471ca202..8957bbac7a7 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -21,6 +21,7 @@
#include <linux/gfp.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/notifier.h>
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -29,6 +30,8 @@ static struct class *hwmon_class;
static DEFINE_IDA(hwmon_ida);
+static BLOCKING_NOTIFIER_HEAD(hwmon_notifier_list);
+
/**
* hwmon_device_register - register w/ hwmon
* @dev: the device to register
@@ -75,6 +78,24 @@ void hwmon_device_unregister(struct device *dev)
}
EXPORT_SYMBOL_GPL(hwmon_device_unregister);
+int hwmon_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&hwmon_notifier_list, nb);
+}
+EXPORT_SYMBOL(hwmon_notifier_register);
+
+int hwmon_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&hwmon_notifier_list, nb);
+}
+EXPORT_SYMBOL(hwmon_notifier_unregister);
+
+void hwmon_notify(unsigned long val, void *v)
+{
+ blocking_notifier_call_chain(&hwmon_notifier_list, val, v);
+}
+EXPORT_SYMBOL(hwmon_notify);
+
static void __init hwmon_pci_quirks(void)
{
#if defined CONFIG_X86 && defined CONFIG_PCI
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5267ab93d55..9ddf2c97d26 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -431,7 +431,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n",
+ dev_err(&dev->pdev->dev, "Read from Slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -518,7 +518,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n",
+ dev_err(&dev->pdev->dev, "Write to slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -628,12 +628,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
dev->busy = true;
- if (dev->regulator)
- regulator_enable(dev->regulator);
pm_runtime_get_sync(&dev->pdev->dev);
- clk_enable(dev->clk);
-
status = init_hw(dev);
if (status)
goto out;
@@ -666,10 +662,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
}
out:
- clk_disable(dev->clk);
- pm_runtime_put_sync(&dev->pdev->dev);
- if (dev->regulator)
- regulator_disable(dev->regulator);
+
+ pm_runtime_put(&dev->pdev->dev);
dev->busy = false;
@@ -859,9 +853,9 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
#ifdef CONFIG_PM
-static int nmk_i2c_suspend(struct device *dev)
+
+static int nmk_i2c_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct platform_device *pdev = to_platform_device(dev);
struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
if (nmk_i2c->busy)
@@ -870,23 +864,53 @@ static int nmk_i2c_suspend(struct device *dev)
return 0;
}
-static int nmk_i2c_resume(struct device *dev)
+static int nmk_i2c_suspend_noirq(struct device *dev)
{
+ struct nmk_i2c_dev *nmk_i2c =
+ platform_get_drvdata(to_platform_device(dev));
+
+ if (nmk_i2c->busy)
+ return -EBUSY;
+
return 0;
}
+
#else
#define nmk_i2c_suspend NULL
-#define nmk_i2c_resume NULL
+#define nmk_i2c_suspend_noirq NULL
#endif
+static int nmk_i2c_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
+
+ clk_disable(nmk_i2c->clk);
+ if (nmk_i2c->regulator)
+ regulator_disable(nmk_i2c->regulator);
+ return 0;
+}
+
+static int nmk_i2c_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
+
+ if (nmk_i2c->regulator)
+ regulator_enable(nmk_i2c->regulator);
+ clk_enable(nmk_i2c->clk);
+ return 0;
+}
+
/*
* We use noirq so that we suspend late and resume before the wakeup interrupt
* to ensure that we do the !pm_runtime_suspended() check in resume before
* there has been a regular pm runtime resume (via pm_runtime_get_sync()).
*/
static const struct dev_pm_ops nmk_i2c_pm = {
- .suspend_noirq = nmk_i2c_suspend,
- .resume_noirq = nmk_i2c_resume,
+ SET_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, nmk_i2c_runtime_resume,
+ NULL)
+ .suspend_noirq = nmk_i2c_suspend_noirq,
};
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
@@ -1047,6 +1071,7 @@ static struct platform_driver nmk_i2c_driver = {
},
.probe = nmk_i2c_probe,
.remove = __devexit_p(nmk_i2c_remove),
+ .suspend = nmk_i2c_suspend,
};
static int __init nmk_i2c_init(void)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f354813a13e..8aba6cc2bac 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -151,6 +151,16 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
+config KEYBOARD_DB5500
+ tristate "DB5500 keyboard"
+ depends on UX500_SOC_DB5500
+ help
+ Say Y here to enable the on-chip keypad controller on the
+ ST-Ericsson U5500 platform.
+
+ To compile this driver as a module, choose M here: the
+ module will be called db5500_keypad.
+
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -381,7 +391,7 @@ config KEYBOARD_NEWTON
To compile this driver as a module, choose M here: the
module will be called newtonkbd.
-config KEYBOARD_NOMADIK
+config KEYBOARD_NOMADIK_SKE
tristate "ST-Ericsson Nomadik SKE keyboard"
depends on PLAT_NOMADIK
help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index df7061f1291..90a01405e51 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
+obj-$(CONFIG_KEYBOARD_DB5500) += db5500_keypad.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
@@ -31,7 +32,7 @@ obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
-obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
+obj-$(CONFIG_KEYBOARD_NOMADIK_SKE) += nomadik-ske-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
diff --git a/drivers/input/keyboard/db5500_keypad.c b/drivers/input/keyboard/db5500_keypad.c
new file mode 100644
index 00000000000..729775d99e8
--- /dev/null
+++ b/drivers/input/keyboard/db5500_keypad.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License, version 2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <mach/db5500-keypad.h>
+#include <linux/regulator/consumer.h>
+
+#define KEYPAD_CTR 0x0
+#define KEYPAD_IRQ_CLEAR 0x4
+#define KEYPAD_INT_ENABLE 0x8
+#define KEYPAD_INT_STATUS 0xC
+#define KEYPAD_ARRAY_01 0x18
+
+#define KEYPAD_NUM_ARRAY_REGS 5
+
+#define KEYPAD_CTR_WRITE_IRQ_ENABLE (1 << 10)
+#define KEYPAD_CTR_WRITE_CONTROL (1 << 8)
+#define KEYPAD_CTR_SCAN_ENABLE (1 << 7)
+
+#define KEYPAD_ARRAY_CHANGEBIT (1 << 15)
+
+#define KEYPAD_DEBOUNCE_PERIOD_MIN 5 /* ms */
+#define KEYPAD_DEBOUNCE_PERIOD_MAX 80 /* ms */
+
+#define KEYPAD_GND_ROW 8
+
+#define KEYPAD_ROW_SHIFT 3
+#define KEYPAD_KEYMAP_SIZE \
+ (KEYPAD_MAX_ROWS * KEYPAD_MAX_COLS)
+
+#define KEY_PRESSED_DELAY 10
+/**
+ * struct db5500_keypad - data structure used by keypad driver
+ * @irq: irq number
+ * @base: keypad registers base address
+ * @input: pointer to input device object
+ * @board: keypad platform data
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ * @regulator : regulator used by keypad
+ * @switch_work : delayed work variable for switching to gpio
+ * @gpio_work : delayed work variable for reporting key event in gpio mode
+ * @previous_set: previous set of registers
+ * @enable : flag to enable the driver event
+ * @enable_on_resume: set if keypad should be enabled on resume
+ * @valid_key : hold the state of valid key press
+ * @db5500_rows : rows gpio array for db5500 keypad
+ * @db5500_cols : cols gpio array for db5500 keypad
+ * @gpio_input_irq : array for gpio irqs
+ * @gpio_row : gpio row
+ * @gpio_col : gpio_col
+ */
+struct db5500_keypad {
+ int irq;
+ void __iomem *base;
+ struct input_dev *input;
+ const struct db5500_keypad_platform_data *board;
+ unsigned short keymap[KEYPAD_KEYMAP_SIZE];
+ struct clk *clk;
+ struct regulator *regulator;
+ struct delayed_work switch_work;
+ struct delayed_work gpio_work;
+ u8 previous_set[KEYPAD_MAX_ROWS];
+ bool enable;
+ bool enable_on_resume;
+ bool valid_key;
+ int db5500_rows[KEYPAD_MAX_ROWS];
+ int db5500_cols[KEYPAD_MAX_COLS];
+ int gpio_input_irq[KEYPAD_MAX_ROWS];
+ int gpio_row;
+ int gpio_col;
+};
+
+/**
+ * db5500_keypad_report() - reports the keypad event
+ * @keypad: pointer to device structure
+ * @row: row value of keypad
+ * @curr: current event
+ * @previous: previous event
+ *
+ * This function uses to reports the event of the keypad
+ * and returns NONE.
+ *
+ * By default all column reads are 1111 1111b. Any press will pull the column
+ * down, leading to a 0 in any of these locations. We invert these values so
+ * that a 1 means means "column pressed". *
+ * If curr changes from the previous from 0 to 1, we report it as a key press.
+ * If curr changes from the previous from 1 to 0, we report it as a key
+ * release.
+ */
+static void db5500_keypad_report(struct db5500_keypad *keypad, int row,
+ u8 curr, u8 previous)
+{
+ struct input_dev *input = keypad->input;
+ u8 changed = curr ^ previous;
+
+ while (changed) {
+ int col = __ffs(changed);
+ bool press = curr & BIT(col);
+ int code = MATRIX_SCAN_CODE(row, col, KEYPAD_ROW_SHIFT);
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], press);
+ input_sync(input);
+
+ changed &= ~BIT(col);
+ }
+}
+
+static void db5500_keypad_scan(struct db5500_keypad *keypad)
+{
+ u8 current_set[ARRAY_SIZE(keypad->previous_set)];
+ int tries = 100;
+ bool changebit;
+ u32 data_reg;
+ u8 allrows;
+ u8 common;
+ int i;
+
+ writel(0x1, keypad->base + KEYPAD_IRQ_CLEAR);
+
+again:
+ if (!tries--) {
+ dev_warn(&keypad->input->dev, "values failed to stabilize\n");
+ return;
+ }
+
+ changebit = readl(keypad->base + KEYPAD_ARRAY_01)
+ & KEYPAD_ARRAY_CHANGEBIT;
+
+ for (i = 0; i < KEYPAD_NUM_ARRAY_REGS; i++) {
+ data_reg = readl(keypad->base + KEYPAD_ARRAY_01 + 4 * i);
+
+ /* If the change bit changed, we need to reread the data */
+ if (changebit != !!(data_reg & KEYPAD_ARRAY_CHANGEBIT))
+ goto again;
+
+ current_set[2 * i] = ~(data_reg & 0xff);
+
+ /* Last array reg has only one valid set of columns */
+ if (i != KEYPAD_NUM_ARRAY_REGS - 1)
+ current_set[2 * i + 1] = ~((data_reg & 0xff0000) >> 16);
+ }
+
+ allrows = current_set[KEYPAD_GND_ROW];
+
+ /*
+ * Sometimes during a GND row release, an incorrect report is received
+ * where the ARRAY8 all rows setting does not match the other ARRAY*
+ * rows. Ignore this report; the correct one has been observed to
+ * follow it.
+ */
+ common = 0xff;
+ for (i = 0; i < KEYPAD_GND_ROW; i++)
+ common &= current_set[i];
+
+ if ((allrows & common) != common)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(current_set); i++) {
+ /*
+ * If there is an allrows press (GND row), we need to ignore
+ * the allrows values from the reset of the ARRAYs.
+ */
+ if (i < KEYPAD_GND_ROW && allrows)
+ current_set[i] &= ~allrows;
+
+ if (keypad->previous_set[i] == current_set[i])
+ continue;
+
+ db5500_keypad_report(keypad, i, current_set[i],
+ keypad->previous_set[i]);
+ }
+
+ /* update the reference set of array registers */
+ memcpy(keypad->previous_set, current_set, sizeof(keypad->previous_set));
+
+ return;
+}
+
+/**
+ * db5500_keypad_writel() - write into keypad registers
+ * @keypad: pointer to device structure
+ * @val: value to write into register
+ * @reg: register offset
+ *
+ * This function uses to write into the keypad registers
+ * and returns NONE.
+ */
+static void db5500_keypad_writel(struct db5500_keypad *keypad, u32 val, u32 reg)
+{
+ int timeout = 4;
+ int allowedbit;
+
+ switch (reg) {
+ case KEYPAD_CTR:
+ allowedbit = KEYPAD_CTR_WRITE_CONTROL;
+ break;
+ case KEYPAD_INT_ENABLE:
+ allowedbit = KEYPAD_CTR_WRITE_IRQ_ENABLE;
+ break;
+ default:
+ BUG();
+ }
+
+ do {
+ u32 ctr = readl(keypad->base + KEYPAD_CTR);
+
+ if (ctr & allowedbit)
+ break;
+
+ udelay(50);
+ } while (--timeout);
+
+ /* Five 32k clk cycles (~150us) required, we waited 200us */
+ WARN_ON(!timeout);
+
+ writel(val, keypad->base + reg);
+}
+
+/**
+ * db5500_keypad_chip_init() - initialize the keypad chip
+ * @keypad: pointer to device structure
+ *
+ * This function uses to initializes the keypad controller
+ * and returns integer.
+ */
+static int db5500_keypad_chip_init(struct db5500_keypad *keypad)
+{
+ int debounce = keypad->board->debounce_ms;
+ int debounce_hits = 0;
+
+ if (debounce < KEYPAD_DEBOUNCE_PERIOD_MIN)
+ debounce = KEYPAD_DEBOUNCE_PERIOD_MIN;
+
+ if (debounce > KEYPAD_DEBOUNCE_PERIOD_MAX) {
+ debounce_hits = DIV_ROUND_UP(debounce,
+ KEYPAD_DEBOUNCE_PERIOD_MAX) - 1;
+ debounce = KEYPAD_DEBOUNCE_PERIOD_MAX;
+ }
+
+ /* Convert the milliseconds to the bit mask */
+ debounce = DIV_ROUND_UP(debounce, KEYPAD_DEBOUNCE_PERIOD_MIN) - 1;
+
+ clk_enable(keypad->clk);
+
+ db5500_keypad_writel(keypad,
+ KEYPAD_CTR_SCAN_ENABLE
+ | ((debounce_hits & 0x7) << 4)
+ | debounce,
+ KEYPAD_CTR);
+
+ db5500_keypad_writel(keypad, 0x1, KEYPAD_INT_ENABLE);
+
+ return 0;
+}
+
+static void db5500_mode_enable(struct db5500_keypad *keypad, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ db5500_keypad_writel(keypad, 0, KEYPAD_CTR);
+ db5500_keypad_writel(keypad, 0, KEYPAD_INT_ENABLE);
+ if (keypad->board->exit)
+ keypad->board->exit();
+ for (i = 0; i < keypad->board->krow; i++) {
+ enable_irq(keypad->gpio_input_irq[i]);
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ } else {
+ regulator_enable(keypad->regulator);
+ clk_enable(keypad->clk);
+ for (i = 0; i < keypad->board->krow; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ if (keypad->board->init)
+ keypad->board->init();
+ db5500_keypad_chip_init(keypad);
+ }
+}
+
+static void db5500_gpio_switch_work(struct work_struct *work)
+{
+ struct db5500_keypad *keypad = container_of(work,
+ struct db5500_keypad, switch_work.work);
+
+ db5500_mode_enable(keypad, false);
+ keypad->enable = false;
+}
+
+static void db5500_gpio_release_work(struct work_struct *work)
+{
+ int code;
+ struct db5500_keypad *keypad = container_of(work,
+ struct db5500_keypad, gpio_work.work);
+ struct input_dev *input = keypad->input;
+
+ code = MATRIX_SCAN_CODE(keypad->gpio_col, keypad->gpio_row,
+ KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], 1);
+ input_sync(input);
+ input_report_key(input, keypad->keymap[code], 0);
+ input_sync(input);
+}
+
+static int db5500_read_get_gpio_row(struct db5500_keypad *keypad)
+{
+ int row;
+ int value = 0;
+ int ret;
+
+ /* read all rows GPIO data register values */
+ for (row = 0; row < keypad->board->krow; row++) {
+ ret = gpio_get_value(keypad->db5500_rows[row]);
+ value += (1 << row) * ret;
+ }
+
+ /* get the exact row */
+ for (row = 0; row < keypad->board->krow; row++) {
+ if (((1 << row) & value) == 0)
+ return row;
+ }
+
+ return -1;
+}
+
+static void db5500_set_cols(struct db5500_keypad *keypad, int col)
+{
+ int i, ret;
+ int value;
+
+ /*
+ * Set all columns except the requested column
+ * output pin as high
+ */
+ for (i = 0; i < keypad->board->kcol; i++) {
+ if (i == col)
+ value = 0;
+ else
+ value = 1;
+ ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd");
+
+ if (ret < 0) {
+ pr_err("db5500_set_cols: gpio request failed\n");
+ continue;
+ }
+
+ gpio_direction_output(keypad->db5500_cols[i], value);
+ gpio_free(keypad->db5500_cols[i]);
+ }
+}
+
+static void db5500_free_cols(struct db5500_keypad *keypad)
+{
+ int i, ret;
+
+ for (i = 0; i < keypad->board->kcol; i++) {
+ ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd");
+
+ if (ret < 0) {
+ pr_err("db5500_free_cols: gpio request failed\n");
+ continue;
+ }
+
+ gpio_direction_output(keypad->db5500_cols[i], 0);
+ gpio_free(keypad->db5500_cols[i]);
+ }
+}
+
+static void db5500_manual_scan(struct db5500_keypad *keypad)
+{
+ int row;
+ int col;
+
+ keypad->valid_key = false;
+
+ for (col = 0; col < keypad->board->kcol; col++) {
+ db5500_set_cols(keypad, col);
+ row = db5500_read_get_gpio_row(keypad);
+ if (row >= 0) {
+ keypad->valid_key = true;
+ keypad->gpio_row = row;
+ keypad->gpio_col = col;
+ break;
+ }
+ }
+ db5500_free_cols(keypad);
+}
+
+static irqreturn_t db5500_keypad_gpio_irq(int irq, void *dev_id)
+{
+ struct db5500_keypad *keypad = dev_id;
+
+ if (!gpio_get_value(IRQ_TO_GPIO(irq))) {
+ db5500_manual_scan(keypad);
+ if (!keypad->enable) {
+ keypad->enable = true;
+ db5500_mode_enable(keypad, true);
+ }
+
+ /*
+ * Schedule the work queue to change it to
+ * report the key pressed, if it is not detected in keypad mode.
+ */
+ if (keypad->valid_key) {
+ schedule_delayed_work(&keypad->gpio_work,
+ KEY_PRESSED_DELAY);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t db5500_keypad_irq(int irq, void *dev_id)
+{
+ struct db5500_keypad *keypad = dev_id;
+
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ db5500_keypad_scan(keypad);
+
+ /*
+ * Schedule the work queue to change it to
+ * GPIO mode, if there is no activity in keypad mode
+ */
+ if (keypad->enable)
+ schedule_delayed_work(&keypad->switch_work,
+ keypad->board->switch_delay);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * db5500_keypad_probe() - Initialze the the keypad driver
+ * @pdev: pointer to platform device structure
+ *
+ * This function will allocate and initialize the instance
+ * data and request the irq and register to input subsystem driver.
+ */
+static int __devinit db5500_keypad_probe(struct platform_device *pdev)
+{
+ struct db5500_keypad_platform_data *plat;
+ struct db5500_keypad *keypad;
+ struct resource *res;
+ struct input_dev *input;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+ int irq;
+ int i;
+
+ plat = pdev->dev.platform_data;
+ if (!plat) {
+ dev_err(&pdev->dev, "invalid keypad platform data\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get keypad irq\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "missing platform resources\n");
+ ret = -EINVAL;
+ goto out_ret;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ ret = -EBUSY;
+ goto out_ret;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto out_freerequest_memregions;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to clk_get\n");
+ ret = PTR_ERR(clk);
+ goto out_iounmap;
+ }
+
+ keypad = kzalloc(sizeof(struct db5500_keypad), GFP_KERNEL);
+ if (!keypad) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ ret = -ENOMEM;
+ goto out_freeclk;
+ }
+
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&pdev->dev, "failed to input_allocate_device\n");
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+
+ keypad->regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(keypad->regulator)) {
+ dev_err(&pdev->dev, "regulator_get failed\n");
+ keypad->regulator = NULL;
+ ret = -EINVAL;
+ goto out_regulator_get;
+ } else {
+ ret = regulator_enable(keypad->regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "regulator_enable failed\n");
+ goto out_regulator_enable;
+ }
+ }
+
+ input->id.bustype = BUS_HOST;
+ input->name = "db5500-keypad";
+ input->dev.parent = &pdev->dev;
+
+ input->keycode = keypad->keymap;
+ input->keycodesize = sizeof(keypad->keymap[0]);
+ input->keycodemax = ARRAY_SIZE(keypad->keymap);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, input->evbit);
+ if (!plat->no_autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ matrix_keypad_build_keymap(plat->keymap_data, KEYPAD_ROW_SHIFT,
+ input->keycode, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ keypad->base = base;
+ keypad->clk = clk;
+
+ INIT_DELAYED_WORK(&keypad->switch_work, db5500_gpio_switch_work);
+ INIT_DELAYED_WORK(&keypad->gpio_work, db5500_gpio_release_work);
+
+ clk_enable(keypad->clk);
+if (!keypad->board->init) {
+ dev_err(&pdev->dev, "init funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->init() < 0) {
+ dev_err(&pdev->dev, "keyboard init config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (!keypad->board->exit) {
+ dev_err(&pdev->dev, "exit funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->exit() < 0) {
+ dev_err(&pdev->dev, "keyboard exit config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ keypad->db5500_rows[i] = *plat->gpio_input_pins;
+ keypad->gpio_input_irq[i] =
+ GPIO_TO_IRQ(keypad->db5500_rows[i]);
+ plat->gpio_input_pins++;
+ }
+
+ for (i = 0; i < keypad->board->kcol; i++) {
+ keypad->db5500_cols[i] = *plat->gpio_output_pins;
+ plat->gpio_output_pins++;
+ }
+
+ for (i = 0; i < keypad->board->krow; i++) {
+ ret = request_threaded_irq(keypad->gpio_input_irq[i],
+ NULL, db5500_keypad_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+ "db5500-keypad-gpio", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate gpio irq %d failed\n",
+ keypad->gpio_input_irq[i]);
+ goto out_unregisterinput;
+ }
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+
+ ret = request_threaded_irq(keypad->irq, NULL, db5500_keypad_irq,
+ IRQF_ONESHOT, "db5500-keypad", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
+ goto out_unregisterinput;
+ }
+
+ platform_set_drvdata(pdev, keypad);
+
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ return 0;
+
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
+ clk_disable(keypad->clk);
+out_freeinput:
+ input_free_device(input);
+out_regulator_enable:
+ regulator_put(keypad->regulator);
+out_regulator_get:
+ input_free_device(input);
+out_freekeypad:
+ kfree(keypad);
+out_freeclk:
+ clk_put(clk);
+out_iounmap:
+ iounmap(base);
+out_freerequest_memregions:
+ release_mem_region(res->start, resource_size(res));
+out_ret:
+ return ret;
+}
+
+/**
+ * db5500_keypad_remove() - Removes the keypad driver
+ * @pdev: pointer to platform device structure
+ *
+ * This function uses to remove the keypad
+ * driver and returns integer.
+ */
+static int __devexit db5500_keypad_remove(struct platform_device *pdev)
+{
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ free_irq(keypad->irq, keypad);
+ input_unregister_device(keypad->input);
+
+ clk_disable(keypad->clk);
+ clk_put(keypad->clk);
+
+ if (keypad->board->exit)
+ keypad->board->exit();
+
+ regulator_put(keypad->regulator);
+
+ iounmap(keypad->base);
+
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(keypad);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * db5500_keypad_suspend() - suspend the keypad controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to suspend the
+ * keypad controller and returns integer
+ */
+static int db5500_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(irq);
+ else {
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->switch_work);
+ disable_irq(irq);
+ keypad->enable_on_resume = keypad->enable;
+ if (keypad->enable) {
+ db5500_mode_enable(keypad, false);
+ keypad->enable = false;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * db5500_keypad_resume() - resume the keypad controller
+ * @dev: pointer to device structure
+ *
+ * This function is used to resume the keypad
+ * controller and returns integer.
+ */
+static int db5500_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct db5500_keypad *keypad = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(irq);
+ else {
+ if (keypad->enable_on_resume && !keypad->enable) {
+ keypad->enable = true;
+ db5500_mode_enable(keypad, true);
+ /*
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity keypad mode
+ */
+ schedule_delayed_work(&keypad->switch_work,
+ keypad->board->switch_delay);
+ }
+ enable_irq(irq);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops db5500_keypad_dev_pm_ops = {
+ .suspend = db5500_keypad_suspend,
+ .resume = db5500_keypad_resume,
+};
+#endif
+
+static struct platform_driver db5500_keypad_driver = {
+ .driver = {
+ .name = "db5500-keypad",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &db5500_keypad_dev_pm_ops,
+#endif
+ },
+ .probe = db5500_keypad_probe,
+ .remove = __devexit_p(db5500_keypad_remove),
+};
+
+/**
+ * db5500_keypad_init() - Initialize the keypad driver
+ *
+ * This function uses to initializes the db5500
+ * keypad driver and returns integer.
+ */
+static int __init db5500_keypad_init(void)
+{
+ return platform_driver_register(&db5500_keypad_driver);
+}
+module_init(db5500_keypad_init);
+
+/**
+ * db5500_keypad_exit() - De-initialize the keypad driver
+ *
+ * This function uses to de-initialize the db5500
+ * keypad driver and returns none.
+ */
+static void __exit db5500_keypad_exit(void)
+{
+ platform_driver_unregister(&db5500_keypad_driver);
+}
+module_exit(db5500_keypad_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_DESCRIPTION("DB5500 Keypad Driver");
+MODULE_ALIAS("platform:db5500-keypad");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 62bfce468f9..1fdf54bb4f6 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -29,6 +29,7 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
struct gpio_button_data {
const struct gpio_keys_button *button;
@@ -46,6 +47,8 @@ struct gpio_keys_drvdata {
struct input_dev *input;
struct mutex disable_lock;
unsigned int n_buttons;
+ bool enabled;
+ bool enable_after_suspend;
int (*enable)(struct device *dev);
void (*disable)(struct device *dev);
struct gpio_button_data data[0];
@@ -524,6 +527,8 @@ static int gpio_keys_open(struct input_dev *input)
{
struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
+ pm_runtime_get_sync(input->dev.parent);
+ ddata->enabled = true;
return ddata->enable ? ddata->enable(input->dev.parent) : 0;
}
@@ -533,6 +538,8 @@ static void gpio_keys_close(struct input_dev *input)
if (ddata->disable)
ddata->disable(input->dev.parent);
+ ddata->enabled = false;
+ pm_runtime_put(input->dev.parent);
}
/*
@@ -675,6 +682,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
ddata->n_buttons = pdata->nbuttons;
ddata->enable = pdata->enable;
ddata->disable = pdata->disable;
+ ddata->enabled = false;
mutex_init(&ddata->disable_lock);
platform_set_drvdata(pdev, ddata);
@@ -691,6 +699,8 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ pm_runtime_enable(&pdev->dev);
+
/* Enable auto repeat feature of Linux input subsystem */
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
@@ -756,6 +766,8 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
struct input_dev *input = ddata->input;
int i;
+ pm_runtime_disable(&pdev->dev);
+
sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
device_init_wakeup(&pdev->dev, 0);
@@ -790,6 +802,10 @@ static int gpio_keys_suspend(struct device *dev)
if (bdata->button->wakeup)
enable_irq_wake(bdata->irq);
}
+ } else {
+ ddata->enable_after_suspend = ddata->enabled;
+ if (ddata->enabled && ddata->disable)
+ ddata->disable(dev);
}
return 0;
@@ -808,6 +824,11 @@ static int gpio_keys_resume(struct device *dev)
if (gpio_is_valid(bdata->button->gpio))
gpio_keys_gpio_report_event(bdata);
}
+
+ if (!device_may_wakeup(dev) && ddata->enable_after_suspend
+ && ddata->enable)
+ ddata->enable(dev);
+
input_sync(ddata->input);
return 0;
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 101e245944e..6a0707f7f76 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -2,7 +2,7 @@
* Copyright (C) ST-Ericsson SA 2010
*
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * co-Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
*
* License terms:GNU General Public License (GPL) version 2
*
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -19,8 +20,10 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <plat/ske.h>
+#include <linux/gpio/nomadik.h>
/* SKE_CR bits */
#define SKE_KPMLT (0x1 << 6)
@@ -48,17 +51,38 @@
#define SKE_ASR3 0x2C
#define SKE_NUM_ASRX_REGISTERS (4)
+#define KEY_PRESSED_DELAY 10
+
+
+#define KEY_REPORTED 1
+#define KEY_PRESSED 2
/**
* struct ske_keypad - data structure used by keypad driver
- * @irq: irq no
- * @reg_base: ske regsiters base address
- * @input: pointer to input device object
- * @board: keypad platform device
- * @keymap: matrix scan code table for keycodes
- * @clk: clock structure pointer
+ * @dev: Pointer to the structure device
+ * @irq: irq no
+ * @reg_base: ske regsiters base address
+ * @input: pointer to input device object
+ * @board: keypad platform device
+ * @keymap: matrix scan code table for keycodes
+ * @clk: clock structure pointer
+ * @ske_keypad_lock: lock used while writting into registers
+ * @enable: flag to enable the driver event
+ * @enable_on_resume: set if keypad should be enabled on resume
+ * @regulator: pointer to the regulator used for ske kyepad
+ * @gpio_input_irq: array for gpio irqs
+ * @key_pressed: hold the key state
+ * @work: delayed work variable for gpio switch
+ * @ske_rows: rows gpio array for ske
+ * @ske_cols: columns gpio array for ske
+ * @gpio_row: gpio row
+ * @gpio_col: gpio column
+ * @gpio_work: delayed work variable for release gpio key
+ * @keys: matrix holding key status
+ * @scan_work: delayed work for scaning new key actions
*/
struct ske_keypad {
+ struct device *dev;
int irq;
void __iomem *reg_base;
struct input_dev *input;
@@ -66,6 +90,19 @@ struct ske_keypad {
unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
struct clk *clk;
spinlock_t ske_keypad_lock;
+ bool enable;
+ bool enable_on_resume;
+ struct regulator *regulator;
+ int *gpio_input_irq;
+ int key_pressed;
+ struct delayed_work work;
+ int *ske_rows;
+ int *ske_cols;
+ int gpio_row;
+ int gpio_col;
+ struct delayed_work gpio_work;
+ u8 **keys;
+ struct delayed_work scan_work;
};
static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
@@ -83,15 +120,15 @@ static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr,
spin_unlock(&keypad->ske_keypad_lock);
}
-/*
+/**
* ske_keypad_chip_init: init keypad controller configuration
- *
+ * @keypad: pointer to device structure
* Enable Multi key press detection, auto scan mode
*/
static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
{
u32 value;
- int timeout = 50;
+ int timeout = keypad->board->debounce_ms;
/* check SKE_RIS to be 0 */
while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
@@ -100,7 +137,7 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
if (!timeout)
return -EINVAL;
- /*
+ /**
* set debounce value
* keypad dbounce is configured in DBCR[15:8]
* dbounce value in steps of 32/32.768 ms
@@ -115,7 +152,7 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
/* enable multi key detection */
ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT);
- /*
+ /**
* set up the number of columns
* KPCN[5:3] defines no. of keypad columns to be auto scanned
*/
@@ -134,14 +171,125 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
return 0;
}
+static void ske_mode_enable(struct ske_keypad *keypad, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ dev_dbg(keypad->dev, "%s disable keypad\n", __func__);
+ writel(0, keypad->reg_base + SKE_CR);
+ if (keypad->board->exit)
+ keypad->board->exit();
+ for (i = 0; i < keypad->board->kconnected_rows; i++) {
+ enable_irq(keypad->gpio_input_irq[i]);
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+ } else {
+ dev_dbg(keypad->dev, "%s enable keypad\n", __func__);
+ regulator_enable(keypad->regulator);
+ clk_enable(keypad->clk);
+ for (i = 0; i < keypad->board->kconnected_rows; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ if (keypad->board->init)
+ keypad->board->init();
+ ske_keypad_chip_init(keypad);
+ }
+}
+static void ske_enable(struct ske_keypad *keypad, bool enable)
+{
+ keypad->enable = enable;
+ if (keypad->enable) {
+ enable_irq(keypad->irq);
+ ske_mode_enable(keypad, true);
+ } else {
+ ske_mode_enable(keypad, false);
+ disable_irq(keypad->irq);
+ }
+}
+
+static ssize_t ske_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", keypad->enable);
+}
+
+static ssize_t ske_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ske_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (keypad->enable != val) {
+ keypad->enable = val ? true : false;
+ ske_enable(keypad, keypad->enable);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ ske_show_attr_enable, ske_store_attr_enable);
+
+static struct attribute *ske_keypad_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group ske_attr_group = {
+ .attrs = ske_keypad_attrs,
+};
+
+static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col)
+{
+ int row = 0, code, pos;
+ u32 ske_ris;
+ int num_of_rows;
+
+ /* find out the row */
+ num_of_rows = hweight8(status);
+ do {
+ pos = __ffs(status);
+ row = pos;
+ status &= ~(1 << pos);
+
+ if (row >= keypad->board->krow)
+ /* no more rows supported by this keypad */
+ break;
+
+ code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
+ ske_ris = readl(keypad->reg_base + SKE_RIS);
+ keypad->key_pressed = ske_ris & SKE_KPRISA;
+
+ dev_dbg(keypad->dev,
+ "%s key_pressed:%d code:%d row:%d col:%d\n",
+ __func__, keypad->key_pressed, code, row, col);
+
+ if (keypad->key_pressed)
+ keypad->keys[row][col] |= KEY_PRESSED;
+
+ num_of_rows--;
+ } while (num_of_rows);
+}
+
static void ske_keypad_read_data(struct ske_keypad *keypad)
{
- struct input_dev *input = keypad->input;
- u16 status;
- int col = 0, row = 0, code;
- int ske_asr, ske_ris, key_pressed, i;
+ u8 status;
+ int col = 0;
+ int ske_asr, i;
- /*
+ /**
* Read the auto scan registers
*
* Each SKE_ASRx (x=0 to x=3) contains two row values.
@@ -153,59 +301,274 @@ static void ske_keypad_read_data(struct ske_keypad *keypad)
if (!ske_asr)
continue;
- /* now that ASRx is zero, find out the column x and row y*/
- if (ske_asr & 0xff) {
+ /* now that ASRx is zero, find out the coloumn x and row y */
+ status = ske_asr & 0xff;
+ if (status) {
col = i * 2;
- status = ske_asr & 0xff;
- } else {
+ if (col >= keypad->board->kcol)
+ /* no more columns supported by this keypad */
+ break;
+ ske_keypad_report(keypad, status, col);
+ }
+ status = (ske_asr & 0xff00) >> 8;
+ if (status) {
col = (i * 2) + 1;
- status = (ske_asr & 0xff00) >> 8;
+ if (col >= keypad->board->kcol)
+ /* no more columns supported by this keypad */
+ break;
+ ske_keypad_report(keypad, status, col);
}
+ }
+}
- /* find out the row */
- row = __ffs(status);
+static void ske_keypad_scan_work(struct work_struct *work)
+{
+ int timeout = 10;
+ int i, j, code;
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, scan_work.work);
+ struct input_dev *input = keypad->input;
- code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT);
- ske_ris = readl(keypad->reg_base + SKE_RIS);
- key_pressed = ske_ris & SKE_KPRISA;
+ /* Wait for autoscan to complete */
+ while (readl(keypad->reg_base + SKE_CR) & SKE_KPASON)
+ cpu_relax();
- input_event(input, EV_MSC, MSC_SCAN, code);
- input_report_key(input, keypad->keymap[code], key_pressed);
- input_sync(input);
+ /* SKEx registers are stable and can be read */
+ ske_keypad_read_data(keypad);
+
+ /* Check for key actions */
+ for (i = 0; i < keypad->board->krow; i++) {
+ for (j = 0; j < keypad->board->kcol; j++) {
+ switch (keypad->keys[i][j]) {
+ case KEY_REPORTED:
+ /**
+ * Key was reported but is no longer pressed,
+ * report it as released.
+ */
+ code = MATRIX_SCAN_CODE(i, j,
+ SKE_KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code],
+ 0);
+ input_sync(input);
+ keypad->keys[i][j] = 0;
+ dev_dbg(keypad->dev,
+ "%s Key release reported, code:%d "
+ "(key %d)\n",
+ __func__, code, keypad->keymap[code]);
+ break;
+ case KEY_PRESSED:
+ /* Key pressed but not yet reported, report */
+ code = MATRIX_SCAN_CODE(i, j,
+ SKE_KEYPAD_ROW_SHIFT);
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code],
+ 1);
+ input_sync(input);
+ dev_dbg(keypad->dev,
+ "%s Key press reported, code:%d "
+ "(key %d)\n",
+ __func__, code, keypad->keymap[code]);
+ /* Intentional fall though */
+ case (KEY_REPORTED | KEY_PRESSED):
+ /**
+ * Key pressed and reported, just reset
+ * KEY_PRESSED for next scan
+ */
+ keypad->keys[i][j] = KEY_REPORTED;
+ break;
+ }
+ }
+ }
+
+ if (keypad->key_pressed) {
+ /*
+ * Key still pressed, schedule work to poll changes in 100 ms
+ * After increasing the delay from 50 to 100 it is taking
+ * 2% to 3% load on average.
+ */
+ schedule_delayed_work(&keypad->scan_work,
+ msecs_to_jiffies(100));
+ } else {
+ /* For safty measure, clear interrupt once more */
+ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
+
+ /* Wait for raw interrupt to clear */
+ while ((readl(keypad->reg_base + SKE_RIS) & SKE_KPRISA) &&
+ --timeout) {
+ udelay(10);
+ }
+
+ if (!timeout)
+ dev_err(keypad->dev,
+ "%s Timeed out waiting on irq to clear\n",
+ __func__);
+
+ /* enable auto scan interrupts */
+ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+
+ /**
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity in SKE mode
+ */
+ if (!keypad->key_pressed && keypad->enable)
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
}
}
+static void ske_gpio_switch_work(struct work_struct *work)
+{
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, work.work);
+
+ ske_mode_enable(keypad, false);
+ keypad->enable = false;
+}
+
+static void ske_gpio_release_work(struct work_struct *work)
+{
+ int code;
+ struct ske_keypad *keypad = container_of(work,
+ struct ske_keypad, gpio_work.work);
+ struct input_dev *input = keypad->input;
+
+ code = MATRIX_SCAN_CODE(keypad->gpio_row, keypad->gpio_col,
+ SKE_KEYPAD_ROW_SHIFT);
+
+ dev_dbg(keypad->dev, "%s Key press reported, code:%d (key %d)\n",
+ __func__, code, keypad->keymap[code]);
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, keypad->keymap[code], 1);
+ input_sync(input);
+ input_report_key(input, keypad->keymap[code], 0);
+ input_sync(input);
+}
+
+static int ske_read_get_gpio_row(struct ske_keypad *keypad)
+{
+ int row;
+ int value = 0;
+ int ret;
+
+ /* read all rows GPIO data register values */
+ for (row = 0; row < keypad->board->kconnected_rows ; row++) {
+ ret = gpio_get_value(keypad->ske_rows[row]);
+ value += (1 << row) * ret;
+ }
+
+ /* get the exact row */
+ for (row = 0; row < keypad->board->kconnected_rows; row++) {
+ if (((1 << row) & value) == 0)
+ return row;
+ }
+
+ return -1;
+}
+
+static void ske_set_cols(struct ske_keypad *keypad, int col)
+{
+ int i ;
+ int value;
+
+ /**
+ * Set all columns except the requested column
+ * output pin as high
+ */
+ for (i = 0; i < keypad->board->kconnected_cols; i++) {
+ if (i == col)
+ value = 0;
+ else
+ value = 1;
+ gpio_request(keypad->ske_cols[i], "ske-kp");
+ gpio_direction_output(keypad->ske_cols[i], value);
+ gpio_free(keypad->ske_cols[i]);
+ }
+}
+
+static void ske_free_cols(struct ske_keypad *keypad)
+{
+ int i ;
+
+ for (i = 0; i < keypad->board->kconnected_cols; i++) {
+ gpio_request(keypad->ske_cols[i], "ske-kp");
+ gpio_direction_output(keypad->ske_cols[i], 0);
+ gpio_free(keypad->ske_cols[i]);
+ }
+}
+
+static void ske_manual_scan(struct ske_keypad *keypad)
+{
+ int row;
+ int col;
+
+ for (col = 0; col < keypad->board->kconnected_cols; col++) {
+ ske_set_cols(keypad, col);
+ row = ske_read_get_gpio_row(keypad);
+ if (row >= 0) {
+ keypad->key_pressed = 1;
+ keypad->gpio_row = row;
+ keypad->gpio_col = col;
+ break;
+ }
+ }
+ ske_free_cols(keypad);
+}
+
+static irqreturn_t ske_keypad_gpio_irq(int irq, void *dev_id)
+{
+ struct ske_keypad *keypad = dev_id;
+
+ if (!gpio_get_value(NOMADIK_IRQ_TO_GPIO(irq))) {
+ ske_manual_scan(keypad);
+ if (!keypad->enable) {
+ keypad->enable = true;
+ ske_mode_enable(keypad, true);
+ /**
+ * Schedule the work queue to change it back to GPIO
+ * mode if there is no activity in SKE mode
+ */
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+ /**
+ * Schedule delayed work to report key press if it is not
+ * detected in SKE mode.
+ */
+ if (keypad->key_pressed)
+ schedule_delayed_work(&keypad->gpio_work,
+ KEY_PRESSED_DELAY);
+ }
+
+ return IRQ_HANDLED;
+}
static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
{
struct ske_keypad *keypad = dev_id;
- int retries = 20;
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
/* disable auto scan interrupt; mask the interrupt generated */
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ ske_keypad_set_bits(keypad, SKE_IMSC, SKE_KPIMA, 0x0);
ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA);
- while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --retries)
- msleep(5);
-
- if (retries) {
- /* SKEx registers are stable and can be read */
- ske_keypad_read_data(keypad);
- }
-
- /* enable auto scan interrupts */
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+ schedule_delayed_work(&keypad->scan_work, 0);
return IRQ_HANDLED;
}
static int __init ske_keypad_probe(struct platform_device *pdev)
{
- const struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
struct ske_keypad *keypad;
+ struct resource *res = NULL;
struct input_dev *input;
- struct resource *res;
+ struct clk *clk;
+ void __iomem *reg_base;
+ int ret = 0;
int irq;
- int error;
+ int i;
+ struct ske_keypad_platform_data *plat = pdev->dev.platform_data;
if (!plat) {
dev_err(&pdev->dev, "invalid keypad platform data\n");
@@ -219,42 +582,56 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
+ if (res == NULL) {
dev_err(&pdev->dev, "missing platform resources\n");
- return -EINVAL;
+ return -ENXIO;
}
- keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
- input = input_allocate_device();
- if (!keypad || !input) {
- dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ return -EBUSY;
}
- keypad->irq = irq;
- keypad->board = plat;
- keypad->input = input;
- spin_lock_init(&keypad->ske_keypad_lock);
+ reg_base = ioremap(res->start, resource_size(res));
+ if (!reg_base) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto out_freerequest_memregions;
+ }
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_mem;
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to clk_get\n");
+ ret = PTR_ERR(clk);
+ goto out_freeioremap;
}
- keypad->reg_base = ioremap(res->start, resource_size(res));
- if (!keypad->reg_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto err_free_mem_region;
+ /* resources are sane; we begin allocation */
+ keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
+ if (!keypad) {
+ dev_err(&pdev->dev, "failed to allocate keypad memory\n");
+ goto out_freeclk;
}
+ keypad->dev = &pdev->dev;
- keypad->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_iounmap;
+ input = input_allocate_device();
+ if (!input) {
+ dev_err(&pdev->dev, "failed to input_allocate_device\n");
+ ret = -ENOMEM;
+ goto out_freekeypad;
+ }
+ keypad->regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(keypad->regulator)) {
+ dev_err(&pdev->dev, "regulator_get failed\n");
+ keypad->regulator = NULL;
+ goto out_regulator_get;
+ } else {
+ ret = regulator_enable(keypad->regulator);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "regulator_enable failed\n");
+ goto out_regulator_enable;
+ }
}
input->id.bustype = BUS_HOST;
@@ -266,38 +643,153 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
input->keycodemax = ARRAY_SIZE(keypad->keymap);
input_set_capability(input, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input, keypad);
__set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
+ input->keycode, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to register input device: %d\n", ret);
+ goto out_freeinput;
+ }
+
+ keypad->irq = irq;
+ keypad->board = plat;
+ keypad->input = input;
+ keypad->reg_base = reg_base;
+ keypad->clk = clk;
+ INIT_DELAYED_WORK(&keypad->work, ske_gpio_switch_work);
+ INIT_DELAYED_WORK(&keypad->gpio_work, ske_gpio_release_work);
+ INIT_DELAYED_WORK(&keypad->scan_work, ske_keypad_scan_work);
+ /* allocations are sane, we begin HW initialization */
clk_enable(keypad->clk);
- /* go through board initialization helpers */
- if (keypad->board->init)
- keypad->board->init();
+ if (!keypad->board->init) {
+ dev_err(&pdev->dev, "init funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
- error = ske_keypad_chip_init(keypad);
- if (error) {
- dev_err(&pdev->dev, "unable to init keypad hardware\n");
- goto err_clk_disable;
+ if (keypad->board->init() < 0) {
+ dev_err(&pdev->dev, "keyboard init config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
}
- error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
- if (error) {
- dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- goto err_clk_disable;
+ if (!keypad->board->exit) {
+ dev_err(&pdev->dev, "exit funtion not defined\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (keypad->board->exit() < 0) {
+ dev_err(&pdev->dev, "keyboard exit config failed\n");
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ if (plat->kconnected_rows == 0) {
+ /*
+ * Board config data does not specify the number of connected
+ * rows and columns; assume that it matches the specified max
+ * values.
+ */
+ plat->kconnected_rows = plat->krow;
+ plat->kconnected_cols = plat->kcol;
+ }
+
+ /* this code doesn't currently support non-square keypad */
+ if (plat->kconnected_rows != plat->kconnected_cols) {
+ dev_err(&pdev->dev,
+ "invalid keypad configuration (not square)\n"),
+ ret = -EINVAL;
+ goto out_unregisterinput;
}
- error = input_register_device(input);
- if (error) {
+ if (plat->kconnected_rows > SKE_KPD_MAX_ROWS ||
+ plat->kconnected_cols > SKE_KPD_MAX_COLS) {
dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- goto err_free_irq;
+ "invalid keypad configuration (too many rows/cols)\n"),
+ ret = -EINVAL;
+ goto out_unregisterinput;
+ }
+
+ keypad->gpio_input_irq = kmalloc(sizeof(*keypad->gpio_input_irq) *
+ plat->kconnected_rows,
+ GFP_KERNEL);
+ if (!keypad->gpio_input_irq) {
+ dev_err(&pdev->dev, "failed to allocate input_irq memory\n");
+ goto out_unregisterinput;
+ }
+
+ keypad->ske_rows = kmalloc(sizeof(*keypad->ske_rows) *
+ plat->kconnected_rows, GFP_KERNEL);
+ if (!keypad->ske_rows) {
+ dev_err(&pdev->dev, "failed to allocate ske_rows memory\n");
+ goto out_freemem_input_irq;
+ }
+
+ keypad->ske_cols = kmalloc(sizeof(*keypad->ske_cols) *
+ plat->kconnected_cols, GFP_KERNEL);
+ if (!keypad->ske_cols) {
+ dev_err(&pdev->dev, "failed to allocate ske_cols memory\n");
+ goto out_freemem_rows;
+ }
+
+ keypad->keys = kzalloc(sizeof(*keypad->keys) * plat->krow, GFP_KERNEL);
+ if (!keypad->keys) {
+ dev_err(&pdev->dev, "failed to allocate keys:rows memory\n");
+ goto out_freemem_cols;
+ }
+ for (i = 0; i < plat->krow; i++) {
+ keypad->keys[i] = kzalloc(sizeof(*keypad->keys[i]) *
+ plat->kcol, GFP_KERNEL);
+ if (!keypad->keys[i]) {
+ dev_err(&pdev->dev,
+ "failed to allocate keys:cols memory\n");
+ goto out_freemem_keys;
+ }
+ }
+
+ for (i = 0; i < plat->kconnected_rows; i++) {
+ keypad->ske_rows[i] = *plat->gpio_input_pins;
+ keypad->ske_cols[i] = *plat->gpio_output_pins;
+ keypad->gpio_input_irq[i] =
+ NOMADIK_GPIO_TO_IRQ(keypad->ske_rows[i]);
+ }
+
+ for (i = 0; i < keypad->board->kconnected_rows; i++) {
+ ret = request_threaded_irq(keypad->gpio_input_irq[i],
+ NULL, ske_keypad_gpio_irq,
+ IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND,
+ "ske-keypad-gpio", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate gpio irq %d failed\n",
+ keypad->gpio_input_irq[i]);
+ goto out_freemem_keys;
+ }
+ enable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+
+ ret = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
+ IRQF_ONESHOT, "ske-keypad", keypad);
+ if (ret) {
+ dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
+ goto out_freemem_keys;
+ }
+
+ /* sysfs implementation for dynamic enable/disable the input event */
+ ret = sysfs_create_group(&pdev->dev.kobj, &ske_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs entries\n");
+ goto out_free_irq;
}
if (plat->wakeup_enable)
@@ -305,37 +797,80 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
+ clk_disable(keypad->clk);
+ regulator_disable(keypad->regulator);
+
return 0;
-err_free_irq:
+out_free_irq:
free_irq(keypad->irq, keypad);
-err_clk_disable:
+out_freemem_keys:
+ for (i = 0; i < plat->krow; i++)
+ kfree(keypad->keys[i]);
+ kfree(keypad->keys);
+out_freemem_cols:
+ kfree(keypad->ske_cols);
+out_freemem_rows:
+ kfree(keypad->ske_rows);
+out_freemem_input_irq:
+ kfree(keypad->gpio_input_irq);
+out_unregisterinput:
+ input_unregister_device(input);
+ input = NULL;
clk_disable(keypad->clk);
- clk_put(keypad->clk);
-err_iounmap:
- iounmap(keypad->reg_base);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
+out_freeinput:
+ regulator_disable(keypad->regulator);
+out_regulator_enable:
+ regulator_put(keypad->regulator);
+out_regulator_get:
input_free_device(input);
+out_freekeypad:
kfree(keypad);
- return error;
+out_freeclk:
+ clk_put(clk);
+out_freeioremap:
+ iounmap(reg_base);
+out_freerequest_memregions:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
}
static int __devexit ske_keypad_remove(struct platform_device *pdev)
{
struct ske_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int i;
- free_irq(keypad->irq, keypad);
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
+ cancel_delayed_work_sync(&keypad->scan_work);
- input_unregister_device(keypad->input);
+ for (i = 0; i < keypad->board->krow; i++)
+ kfree(keypad->keys[i]);
+ kfree(keypad->keys);
+ kfree(keypad->ske_cols);
+ kfree(keypad->ske_rows);
- clk_disable(keypad->clk);
+ input_unregister_device(keypad->input);
+ sysfs_remove_group(&pdev->dev.kobj, &ske_attr_group);
+ if (keypad->enable)
+ clk_disable(keypad->clk);
clk_put(keypad->clk);
- if (keypad->board->exit)
+ if (keypad->enable && keypad->board->exit)
keypad->board->exit();
+ else {
+ for (i = 0; i < keypad->board->krow; i++) {
+ disable_irq_nosync(keypad->gpio_input_irq[i]);
+ disable_irq_wake(keypad->gpio_input_irq[i]);
+ }
+ }
+ for (i = 0; i < keypad->board->krow; i++)
+ free_irq(keypad->gpio_input_irq[i], keypad);
+
+ kfree(keypad->gpio_input_irq);
+ free_irq(keypad->irq, keypad);
+ regulator_put(keypad->regulator);
iounmap(keypad->reg_base);
release_mem_region(res->start, resource_size(res));
@@ -353,8 +888,19 @@ static int ske_keypad_suspend(struct device *dev)
if (device_may_wakeup(dev))
enable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0);
+ else {
+ cancel_delayed_work_sync(&keypad->gpio_work);
+ cancel_delayed_work_sync(&keypad->work);
+ cancel_delayed_work_sync(&keypad->scan_work);
+ disable_irq(irq);
+
+ keypad->enable_on_resume = keypad->enable;
+
+ if (keypad->enable) {
+ ske_mode_enable(keypad, false);
+ keypad->enable = false;
+ }
+ }
return 0;
}
@@ -367,8 +913,20 @@ static int ske_keypad_resume(struct device *dev)
if (device_may_wakeup(dev))
disable_irq_wake(irq);
- else
- ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA);
+ else {
+ if (keypad->enable_on_resume && !keypad->enable) {
+ keypad->enable = true;
+ ske_mode_enable(keypad, true);
+ /*
+ * Schedule the work queue to change it to GPIO mode
+ * if there is no activity in SKE mode
+ */
+ if (!keypad->key_pressed)
+ schedule_delayed_work(&keypad->work,
+ keypad->board->switch_delay);
+ }
+ enable_irq(irq);
+ }
return 0;
}
@@ -399,6 +957,6 @@ static void __exit ske_keypad_exit(void)
module_exit(ske_keypad_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com>");
MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver");
MODULE_ALIAS("platform:nomadik-ske-keypad");
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 9397cf9c625..892335275dd 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -108,10 +108,52 @@ struct stmpe_keypad {
unsigned int rows;
unsigned int cols;
+ bool enable;
unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
};
+static ssize_t stmpe_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ return sprintf(buf, "%d\n", keypad->enable);
+}
+
+static ssize_t stmpe_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (keypad->enable != val) {
+ keypad->enable = val;
+ if (!val)
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+ else
+ stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ stmpe_show_attr_enable, stmpe_store_attr_enable);
+
+static struct attribute *stmpe_keypad_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group stmpe_attr_group = {
+ .attrs = stmpe_keypad_attrs,
+};
+
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
{
const struct stmpe_keypad_variant *variant = keypad->variant;
@@ -285,7 +327,7 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
goto out_freekeypad;
}
- input->name = "STMPE keypad";
+ input->name = "STMPE-keypad";
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
@@ -332,10 +374,20 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
goto out_unregisterinput;
}
+ /* sysfs implementation for dynamic enable/disable the input event */
+ ret = sysfs_create_group(&pdev->dev.kobj, &stmpe_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create sysfs entries\n");
+ goto out_free_irq;
+ }
+
+ keypad->enable = true;
platform_set_drvdata(pdev, keypad);
return 0;
+out_free_irq:
+ free_irq(irq, keypad);
out_unregisterinput:
input_unregister_device(input);
input = NULL;
@@ -354,6 +406,7 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+ sysfs_remove_group(&pdev->dev.kobj, &stmpe_attr_group);
free_irq(irq, keypad);
input_unregister_device(keypad->input);
platform_set_drvdata(pdev, NULL);
@@ -362,9 +415,43 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int stmpe_keypad_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+
+ if (!device_may_wakeup(stmpe->dev))
+ stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ return 0;
+}
+
+static int stmpe_keypad_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
+ struct stmpe *stmpe = keypad->stmpe;
+
+ if (!device_may_wakeup(stmpe->dev))
+ stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
+
+ return 0;
+}
+
+static const struct dev_pm_ops stmpe_keypad_dev_pm_ops = {
+ .suspend = stmpe_keypad_suspend,
+ .resume = stmpe_keypad_resume,
+};
+#endif
+
static struct platform_driver stmpe_keypad_driver = {
.driver.name = "stmpe-keypad",
.driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &stmpe_keypad_dev_pm_ops,
+#endif
.probe = stmpe_keypad_probe,
.remove = __devexit_p(stmpe_keypad_remove),
};
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7faf4a7fcaa..dedd5d6cf7a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,12 +22,26 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AB8500_ACCDET
+ bool "AB8500 AV Accessory detection"
+ depends on AB8500_CORE && AB8500_GPADC && GPIO_AB8500
+ help
+ Say Y here to enable AV accessory detection features for ST-Ericsson's
+ AB8500 Mix-Sig PMIC.
+
+config INPUT_AB5500_ACCDET
+ bool "AB5500 AV Accessory detection"
+ depends on AB5500_CORE && AB5500_GPADC
+ help
+ Say Y here to enable AV accessory detection features for ST-Ericsson's
+ AB5500 Mix-Sig PMIC.
+
config INPUT_AB8500_PONKEY
- tristate "AB8500 Pon (PowerOn) Key"
- depends on AB8500_CORE
+ tristate "AB5500/AB8500 Pon (PowerOn) Key"
+ depends on AB5500_CORE || AB8500_CORE
help
- Say Y here to use the PowerOn Key for ST-Ericsson's AB8500
- Mix-Sig PMIC.
+ Say Y here to use the PowerOn Key for ST-Ericsson's AB5500/AB8500
+ Mix-Sig PMICs.
To compile this driver as a module, choose M here: the module
will be called ab8500-ponkey.
@@ -590,4 +604,14 @@ config INPUT_XEN_KBDDEV_FRONTEND
To compile this driver as a module, choose M here: the
module will be called xen-kbdfront.
+config INPUT_STE_FF_VIBRA
+ tristate "ST-Ericsson Force Feedback Vibrator"
+ depends on STE_AUDIO_IO_DEV
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for ST-Ericsson's Vibrator which
+ registers as an input force feedback driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called ste_ff_vibra.
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f55cdf4916f..e9aa2d854bc 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,8 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AB8500_ACCDET) += abx500-accdet.o ab8500-accdet.o
+obj-$(CONFIG_INPUT_AB5500_ACCDET) += abx500-accdet.o ab5500-accdet.o
obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o
obj-$(CONFIG_INPUT_AD714X) += ad714x.o
obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
@@ -55,3 +57,4 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
+obj-$(CONFIG_INPUT_STE_FF_VIBRA) += ste_ff_vibra.o
diff --git a/drivers/input/misc/ab5500-accdet.c b/drivers/input/misc/ab5500-accdet.c
new file mode 100644
index 00000000000..fa8e2523126
--- /dev/null
+++ b/drivers/input/misc/ab5500-accdet.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/input/abx500-accdet.h>
+
+/*
+ * Register definition for accessory detection.
+ */
+#define AB5500_REGU_CTRL1_SPARE_REG 0x84
+#define AB5500_ACC_DET_DB1_REG 0x20
+#define AB5500_ACC_DET_DB2_REG 0x21
+#define AB5500_ACC_DET_CTRL_REG 0x23
+#define AB5500_VDENC_CTRL0 0x80
+
+/* REGISTER: AB8500_ACC_DET_CTRL_REG */
+#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08)
+#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01)
+
+/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */
+#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01
+
+/* REGISTER: AB8500_IT_SOURCE5_REG */
+#define BIT_ITSOURCE5_ACCDET1 0x02
+
+static struct accessory_irq_descriptor ab5500_irq_desc[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "acc_detedt1db_falling",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "acc_detedt1db_rising",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "acc_detedt21db_falling",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "acc_detedt21db_rising",
+ .isr = button_release_irq_handler,
+ },
+};
+
+static struct accessory_regu_descriptor ab5500_regu_desc[] = {
+ {
+ .id = REGULATOR_VAMIC1,
+ .name = "v-amic",
+ },
+};
+
+
+/*
+ * configures accdet2 input on/off
+ */
+static void ab5500_config_accdetect2_hw(struct abx500_ad *dd, int enable)
+{
+ int ret = 0;
+
+ if (!dd->accdet2_th_set) {
+ /* Configure accdetect21+22 thresholds */
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_DB2_REG,
+ dd->pdata->accdet2122_th);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ goto out;
+ } else {
+ dd->accdet2_th_set = 1;
+ }
+ }
+
+ /* Enable/Disable accdetect21 comparators + pullup */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL2_ENA,
+ enable ? BITS_ACCDETCTRL2_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n",
+ __func__, ret);
+out:
+ return;
+}
+
+/*
+ * configures accdet1 input on/off
+ */
+static void ab5500_config_accdetect1_hw(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ if (!dd->accdet1_th_set) {
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_DB1_REG,
+ dd->pdata->accdet1_dbth);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ else
+ dd->accdet1_th_set = 1;
+ }
+
+ /* enable accdetect1 comparator */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL1_ENA,
+ enable ? BITS_ACCDETCTRL1_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * returns the high level status whether some accessory is connected (1|0).
+ */
+static int ab5500_detect_plugged_in(struct abx500_ad *dd)
+{
+ u8 value = 0;
+
+ int status = abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_IT,
+ AB5500_IT_SOURCE3_REG,
+ &value);
+ if (status < 0) {
+ dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n",
+ __func__, status);
+ return 0;
+ }
+
+ if (dd->pdata->is_detection_inverted)
+ return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0;
+ else
+ return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1;
+}
+
+/*
+ * mic_line_voltage_stable - measures a relative stable voltage from spec. input
+ */
+static int ab5500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int iterations = 2;
+ int v1, v2, dv;
+
+ v1 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ do {
+ msleep(1);
+ --iterations;
+ v2 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ dv = abs(v2 - v1);
+ v1 = v2;
+ } while (iterations > 0 && dv > MAX_VOLT_DIFF);
+
+ return v1;
+}
+
+/*
+ * not implemented
+ */
+static int ab5500_meas_alt_voltage_stable(struct abx500_ad *dd)
+{
+ return -1;
+}
+
+/*
+ * configures HW so that it is possible to make decision whether
+ * accessory is connected or not.
+ */
+static void ab5500_config_hw_test_plug_connected(struct abx500_ad *dd,
+ int enable)
+{
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ /* enable mic BIAS2 */
+ if (enable)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+}
+
+/*
+ * configures HW so that carkit/headset detection can be accomplished.
+ */
+static void ab5500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable)
+{
+ /* enable mic BIAS2 */
+ if (enable)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+}
+
+static u8 acc_det_ctrl_suspend_val;
+
+static void ab5500_turn_off_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn off AccDetect comparators and pull-up */
+ (void) abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ &acc_det_ctrl_suspend_val);
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ 0);
+}
+
+static void ab5500_turn_on_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn on AccDetect comparators and pull-up */
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_ACC_DET_CTRL_REG,
+ acc_det_ctrl_suspend_val);
+}
+
+static void *ab5500_accdet_abx500_gpadc_get(void)
+{
+ return ab5500_gpadc_get("ab5500-adc.0");
+}
+
+struct abx500_accdet_platform_data *
+ ab5500_get_platform_data(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+struct abx500_ad ab5500_accessory_det_callbacks = {
+ .irq_desc_norm = ab5500_irq_desc,
+ .irq_desc_inverted = NULL,
+ .no_irqs = ARRAY_SIZE(ab5500_irq_desc),
+ .regu_desc = ab5500_regu_desc,
+ .no_of_regu_desc = ARRAY_SIZE(ab5500_regu_desc),
+ .config_accdetect2_hw = ab5500_config_accdetect2_hw,
+ .config_accdetect1_hw = ab5500_config_accdetect1_hw,
+ .detect_plugged_in = ab5500_detect_plugged_in,
+ .meas_voltage_stable = ab5500_meas_voltage_stable,
+ .meas_alt_voltage_stable = ab5500_meas_alt_voltage_stable,
+ .config_hw_test_basic_carkit = ab5500_config_hw_test_basic_carkit,
+ .turn_off_accdet_comparator = ab5500_turn_off_accdet_comparator,
+ .turn_on_accdet_comparator = ab5500_turn_on_accdet_comparator,
+ .accdet_abx500_gpadc_get = ab5500_accdet_abx500_gpadc_get,
+ .config_hw_test_plug_connected = ab5500_config_hw_test_plug_connected,
+ .set_av_switch = NULL,
+ .get_platform_data = ab5500_get_platform_data,
+};
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ab8500-accdet.c b/drivers/input/misc/ab8500-accdet.c
new file mode 100644
index 00000000000..1b96b6b3fef
--- /dev/null
+++ b/drivers/input/misc/ab8500-accdet.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/abx500.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/input/abx500-accdet.h>
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+#include <sound/ux500_ab8500_ext.h>
+#endif
+
+#define MAX_DET_COUNT 10
+#define MAX_VOLT_DIFF 30
+#define MIN_MIC_POWER -100
+
+/* Unique value used to identify Headset button input device */
+#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn"
+#define BTN_INPUT_DEV_NAME "AB8500 Hs Button"
+
+#define DEBOUNCE_PLUG_EVENT_MS 100
+#define DEBOUNCE_PLUG_RETEST_MS 25
+#define DEBOUNCE_UNPLUG_EVENT_MS 0
+
+/*
+ * Register definition for accessory detection.
+ */
+#define AB8500_REGU_CTRL1_SPARE_REG 0x84
+#define AB8500_ACC_DET_DB1_REG 0x80
+#define AB8500_ACC_DET_DB2_REG 0x81
+#define AB8500_ACC_DET_CTRL_REG 0x82
+#define AB8500_IT_SOURCE5_REG 0x04
+
+/* REGISTER: AB8500_ACC_DET_CTRL_REG */
+#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08)
+#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01)
+
+/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */
+#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01
+
+/* REGISTER: AB8500_IT_SOURCE5_REG */
+#define BIT_ITSOURCE5_ACCDET1 0x04
+
+/* After being loaded, how fast the first check is to be made */
+#define INIT_DELAY_MS 3000
+
+/* Voltage limits (mV) for various types of AV Accessories */
+#define ACCESSORY_DET_VOL_DONTCARE -1
+#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0
+#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40
+#define ACCESSORY_CARKIT_DET_VOL_MIN 1100
+#define ACCESSORY_CARKIT_DET_VOL_MAX 1300
+#define ACCESSORY_HEADSET_DET_VOL_MIN 0
+#define ACCESSORY_HEADSET_DET_VOL_MAX 200
+#define ACCESSORY_OPENCABLE_DET_VOL_MIN 1730
+#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150
+
+/* Static data initialization */
+
+static struct accessory_regu_descriptor ab8500_regu_desc[3] = {
+ {
+ .id = REGULATOR_VAUDIO,
+ .name = "v-audio",
+ },
+ {
+ .id = REGULATOR_VAMIC1,
+ .name = "v-amic1",
+ },
+ {
+ .id = REGULATOR_AVSWITCH,
+ .name = "vcc-N2158",
+ },
+};
+
+static struct accessory_irq_descriptor ab8500_irq_desc_norm[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "ACC_DETECT_22DB_F",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "ACC_DETECT_22DB_R",
+ .isr = button_release_irq_handler,
+ },
+};
+
+static struct accessory_irq_descriptor ab8500_irq_desc_inverted[] = {
+ {
+ .irq = PLUG_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .isr = plug_irq_handler,
+ },
+ {
+ .irq = UNPLUG_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .isr = unplug_irq_handler,
+ },
+ {
+ .irq = BUTTON_PRESS_IRQ,
+ .name = "ACC_DETECT_22DB_R",
+ .isr = button_press_irq_handler,
+ },
+ {
+ .irq = BUTTON_RELEASE_IRQ,
+ .name = "ACC_DETECT_22DB_F",
+ .isr = button_release_irq_handler,
+ },
+};
+
+/*
+ * configures accdet2 input on/off
+ */
+static void ab8500_config_accdetect2_hw(struct abx500_ad *dd, int enable)
+{
+ int ret = 0;
+
+ if (!dd->accdet2_th_set) {
+ /* Configure accdetect21+22 thresholds */
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_DB2_REG,
+ dd->pdata->accdet2122_th);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ goto out;
+ } else {
+ dd->accdet2_th_set = 1;
+ }
+ }
+
+ /* Enable/Disable accdetect21 comparators + pullup */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL2_ENA,
+ enable ? BITS_ACCDETCTRL2_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n",
+ __func__, ret);
+
+out:
+ return;
+}
+
+/*
+ * configures accdet1 input on/off
+ */
+static void ab8500_config_accdetect1_hw(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ if (!dd->accdet1_th_set) {
+ ret = abx500_set_register_interruptible(&dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_DB1_REG,
+ dd->pdata->accdet1_dbth);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to write reg (%d).\n", __func__,
+ ret);
+ else
+ dd->accdet1_th_set = 1;
+ }
+
+ /* enable accdetect1 comparator */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ BITS_ACCDETCTRL1_ENA,
+ enable ? BITS_ACCDETCTRL1_ENA : 0);
+
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * returns the high level status whether some accessory is connected (1|0).
+ */
+static int ab8500_detect_plugged_in(struct abx500_ad *dd)
+{
+ u8 value = 0;
+
+ int status = abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_INTERRUPT,
+ AB8500_IT_SOURCE5_REG,
+ &value);
+ if (status < 0) {
+ dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n",
+ __func__, status);
+ return 0;
+ }
+
+ if (dd->pdata->is_detection_inverted)
+ return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0;
+ else
+ return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1;
+}
+
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+
+/*
+ * meas_voltage_stable - measures relative stable voltage from spec. input
+ */
+static int ab8500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int ret, mv;
+
+ ret = ux500_ab8500_audio_gpadc_measure((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2, false, &mv);
+
+ return (ret < 0) ? ret : mv;
+}
+
+/*
+ * meas_alt_voltage_stable - measures relative stable voltage from spec. input
+ */
+static int ab8500_meas_alt_voltage_stable(struct abx500_ad *dd)
+{
+ int ret, mv;
+
+ ret = ux500_ab8500_audio_gpadc_measure((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2, true, &mv);
+
+ return (ret < 0) ? ret : mv;
+}
+
+#else
+
+/*
+ * meas_voltage_stable - measures relative stable voltage from spec. input
+ */
+static int ab8500_meas_voltage_stable(struct abx500_ad *dd)
+{
+ int iterations = 2;
+ int v1, v2, dv;
+
+ v1 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ do {
+ msleep(1);
+ --iterations;
+ v2 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc,
+ ACC_DETECT2);
+ dv = abs(v2 - v1);
+ v1 = v2;
+ } while (iterations > 0 && dv > MAX_VOLT_DIFF);
+
+ return v1;
+}
+
+/*
+ * not implemented for non soc setups
+ */
+static int ab8500_meas_alt_voltage_stable(struct abx500_ad *dd)
+{
+ return -1;
+}
+
+#endif
+
+/*
+ * configures HW so that it is possible to make decision whether
+ * accessory is connected or not.
+ */
+static void ab8500_config_hw_test_plug_connected(struct abx500_ad *dd,
+ int enable)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ ret = ab8500_config_pulldown(&dd->pdev->dev,
+ dd->pdata->video_ctrl_gpio, !enable);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+ return;
+ }
+
+ if (enable)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+}
+
+/*
+ * configures HW so that carkit/headset detection can be accomplished.
+ */
+static void ab8500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable);
+
+ if (enable)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+
+ /* Un-Ground the VAMic1 output when enabled */
+ ret = abx500_mask_and_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_REGU_CTRL1,
+ AB8500_REGU_CTRL1_SPARE_REG,
+ BIT_REGUCTRL1SPARE_VAMIC1_GROUND,
+ enable ? BIT_REGUCTRL1SPARE_VAMIC1_GROUND : 0);
+ if (ret < 0)
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to update reg (%d).\n", __func__, ret);
+}
+
+/*
+ * sets the av switch direction - audio-in vs video-out
+ */
+static void ab8500_set_av_switch(struct abx500_ad *dd,
+ enum accessory_avcontrol_dir dir)
+{
+ int ret;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (%d)\n", __func__, dir);
+ if (dir == NOT_SET) {
+ ret = gpio_direction_input(dd->pdata->video_ctrl_gpio);
+ dd->gpio35_dir_set = 0;
+ ret = gpio_direction_output(dd->pdata->video_ctrl_gpio, 0);
+ if (dd->pdata->mic_ctrl)
+ gpio_direction_output(dd->pdata->mic_ctrl, 0);
+ } else if (!dd->gpio35_dir_set) {
+ ret = gpio_direction_output(dd->pdata->video_ctrl_gpio,
+ dir == AUDIO_IN ? 1 : 0);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: video_ctrl pin output config failed (%d).\n",
+ __func__, ret);
+ return;
+ }
+
+ if (dd->pdata->mic_ctrl) {
+ ret = gpio_direction_output(dd->pdata->mic_ctrl,
+ dir == AUDIO_IN ? 1 : 0);
+ if (ret < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: mic_ctrl pin output"
+ "config failed (%d).\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ dd->gpio35_dir_set = 1;
+ dev_dbg(&dd->pdev->dev, "AV-SWITCH: %s\n",
+ dir == AUDIO_IN ? "AUDIO_IN" : "VIDEO_OUT");
+ } else {
+ gpio_set_value(dd->pdata->video_ctrl_gpio,
+ dir == AUDIO_IN ? 1 : 0);
+ }
+}
+
+static u8 acc_det_ctrl_suspend_val;
+
+static void ab8500_turn_off_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn off AccDetect comparators and pull-up */
+ (void) abx500_get_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ &acc_det_ctrl_suspend_val);
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ 0);
+
+}
+
+static void ab8500_turn_on_accdet_comparator(struct platform_device *pdev)
+{
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+
+ /* Turn on AccDetect comparators and pull-up */
+ (void) abx500_set_register_interruptible(
+ &dd->pdev->dev,
+ AB8500_ECI_AV_ACC,
+ AB8500_ACC_DET_CTRL_REG,
+ acc_det_ctrl_suspend_val);
+
+}
+
+static void *ab8500_accdet_abx500_gpadc_get(void)
+{
+ return ab8500_gpadc_get("ab8500-gpadc.0");
+}
+
+struct abx500_accdet_platform_data *
+ ab8500_get_platform_data(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *plat;
+
+ plat = dev_get_platdata(pdev->dev.parent);
+
+ if (!plat || !plat->accdet) {
+ dev_err(&pdev->dev, "%s: Failed to get accdet plat data.\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return plat->accdet;
+}
+
+struct abx500_ad ab8500_accessory_det_callbacks = {
+ .irq_desc_norm = ab8500_irq_desc_norm,
+ .irq_desc_inverted = ab8500_irq_desc_inverted,
+ .no_irqs = ARRAY_SIZE(ab8500_irq_desc_norm),
+ .regu_desc = ab8500_regu_desc,
+ .no_of_regu_desc = ARRAY_SIZE(ab8500_regu_desc),
+ .config_accdetect2_hw = ab8500_config_accdetect2_hw,
+ .config_accdetect1_hw = ab8500_config_accdetect1_hw,
+ .detect_plugged_in = ab8500_detect_plugged_in,
+ .meas_voltage_stable = ab8500_meas_voltage_stable,
+ .meas_alt_voltage_stable = ab8500_meas_alt_voltage_stable,
+ .config_hw_test_basic_carkit = ab8500_config_hw_test_basic_carkit,
+ .turn_off_accdet_comparator = ab8500_turn_off_accdet_comparator,
+ .turn_on_accdet_comparator = ab8500_turn_on_accdet_comparator,
+ .accdet_abx500_gpadc_get = ab8500_accdet_abx500_gpadc_get,
+ .config_hw_test_plug_connected = ab8500_config_hw_test_plug_connected,
+ .set_av_switch = ab8500_set_av_switch,
+ .get_platform_data = ab8500_get_platform_data,
+};
+
+MODULE_DESCRIPTION("AB8500 AV Accessory detection driver");
+MODULE_ALIAS("platform:ab8500-acc-det");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 350fd0c385d..c3c3c51d302 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -6,7 +6,6 @@
*
* AB8500 Power-On Key handler
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -14,128 +13,208 @@
#include <linux/interrupt.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+/* Ponkey time control bits */
+#define AB5500_MCB 0x2F
+#define AB5500_PONKEY_10SEC 0x0
+#define AB5500_PONKEY_5SEC 0x1
+#define AB5500_PONKEY_DISABLE 0x2
+#define AB5500_PONKEY_TMR_MASK 0x1
+#define AB5500_PONKEY_TR_MASK 0x2
+
+static int ab5500_ponkey_hw_init(struct platform_device *);
+
+struct ab8500_ponkey_variant {
+ const char *irq_falling;
+ const char *irq_rising;
+ int (*hw_init)(struct platform_device *);
+};
+
+static const struct ab8500_ponkey_variant ab5500_onswa = {
+ .irq_falling = "ONSWAn_falling",
+ .irq_rising = "ONSWAn_rising",
+ .hw_init = ab5500_ponkey_hw_init,
+};
+
+static const struct ab8500_ponkey_variant ab8500_ponkey = {
+ .irq_falling = "ONKEY_DBF",
+ .irq_rising = "ONKEY_DBR",
+};
/**
- * struct ab8500_ponkey - ab8500 ponkey information
+ * struct ab8500_ponkey_info - ab8500 ponkey information
* @input_dev: pointer to input device
- * @ab8500: ab8500 parent
* @irq_dbf: irq number for falling transition
* @irq_dbr: irq number for rising transition
*/
-struct ab8500_ponkey {
+struct ab8500_ponkey_info {
struct input_dev *idev;
- struct ab8500 *ab8500;
int irq_dbf;
int irq_dbr;
};
+static int ab5500_ponkey_hw_init(struct platform_device *pdev)
+{
+ u8 val;
+ struct ab5500_ponkey_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata) {
+ switch (pdata->shutdown_secs) {
+ case 0:
+ val = AB5500_PONKEY_DISABLE;
+ break;
+ case 5:
+ val = AB5500_PONKEY_5SEC;
+ break;
+ case 10:
+ val = AB5500_PONKEY_10SEC;
+ break;
+ default:
+ val = AB5500_PONKEY_10SEC;
+ }
+ } else {
+ val = AB5500_PONKEY_10SEC;
+ }
+ return abx500_mask_and_set(
+ &pdev->dev,
+ AB5500_BANK_STARTUP,
+ AB5500_MCB,
+ AB5500_PONKEY_TMR_MASK | AB5500_PONKEY_TR_MASK,
+ val);
+}
+
/* AB8500 gives us an interrupt when ONKEY is held */
static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
{
- struct ab8500_ponkey *ponkey = data;
+ struct ab8500_ponkey_info *info = data;
- if (irq == ponkey->irq_dbf)
- input_report_key(ponkey->idev, KEY_POWER, true);
- else if (irq == ponkey->irq_dbr)
- input_report_key(ponkey->idev, KEY_POWER, false);
+ if (irq == info->irq_dbf)
+ input_report_key(info->idev, KEY_POWER, true);
+ else if (irq == info->irq_dbr)
+ input_report_key(info->idev, KEY_POWER, false);
- input_sync(ponkey->idev);
+ input_sync(info->idev);
return IRQ_HANDLED;
}
static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_ponkey *ponkey;
- struct input_dev *input;
- int irq_dbf, irq_dbr;
- int error;
+ const struct ab8500_ponkey_variant *variant;
+ struct ab8500_ponkey_info *info;
+ int irq_dbf, irq_dbr, ret;
+
+ variant = (const struct ab8500_ponkey_variant *)
+ pdev->id_entry->driver_data;
+
+ if (variant->hw_init) {
+ ret = variant->hw_init(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init hw");
+ return ret;
+ }
+ }
- irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
+ irq_dbf = platform_get_irq_byname(pdev, variant->irq_falling);
if (irq_dbf < 0) {
- dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf);
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ variant->irq_falling, irq_dbf);
return irq_dbf;
}
- irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
+ irq_dbr = platform_get_irq_byname(pdev, variant->irq_rising);
if (irq_dbr < 0) {
- dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr);
+ dev_err(&pdev->dev, "No IRQ for %s: %d\n",
+ variant->irq_rising, irq_dbr);
return irq_dbr;
}
- ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL);
- input = input_allocate_device();
- if (!ponkey || !input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ info = kzalloc(sizeof(struct ab8500_ponkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- ponkey->idev = input;
- ponkey->ab8500 = ab8500;
- ponkey->irq_dbf = irq_dbf;
- ponkey->irq_dbr = irq_dbr;
+ info->irq_dbf = irq_dbf;
+ info->irq_dbr = irq_dbr;
- input->name = "AB8500 POn(PowerOn) Key";
- input->dev.parent = &pdev->dev;
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(&pdev->dev, "Failed to allocate input dev\n");
+ ret = -ENOMEM;
+ goto out;
+ }
- input_set_capability(input, EV_KEY, KEY_POWER);
+ info->idev->name = "AB8500 POn(PowerOn) Key";
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
- error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbf", ponkey);
- if (error < 0) {
- dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
- ponkey->irq_dbf, error);
- goto err_free_mem;
+ ret = input_register_device(info->idev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
+ goto out_unfreedevice;
}
- error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler,
- 0, "ab8500-ponkey-dbr", ponkey);
- if (error < 0) {
- dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
- ponkey->irq_dbr, error);
- goto err_free_dbf_irq;
+ ret = request_threaded_irq(info->irq_dbf, NULL, ab8500_ponkey_handler,
+ IRQF_NO_SUSPEND, "ab8500-ponkey-dbf",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ info->irq_dbf, ret);
+ goto out_unregisterdevice;
}
- error = input_register_device(ponkey->idev);
- if (error) {
- dev_err(ab8500->dev, "Can't register input device: %d\n", error);
- goto err_free_dbr_irq;
+ ret = request_threaded_irq(info->irq_dbr, NULL, ab8500_ponkey_handler,
+ IRQF_NO_SUSPEND, "ab8500-ponkey-dbr",
+ info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ info->irq_dbr, ret);
+ goto out_irq_dbf;
}
- platform_set_drvdata(pdev, ponkey);
- return 0;
+ platform_set_drvdata(pdev, info);
-err_free_dbr_irq:
- free_irq(ponkey->irq_dbr, ponkey);
-err_free_dbf_irq:
- free_irq(ponkey->irq_dbf, ponkey);
-err_free_mem:
- input_free_device(input);
- kfree(ponkey);
+ return 0;
- return error;
+out_irq_dbf:
+ free_irq(info->irq_dbf, info);
+out_unregisterdevice:
+ input_unregister_device(info->idev);
+ info->idev = NULL;
+out_unfreedevice:
+ input_free_device(info->idev);
+out:
+ kfree(info);
+ return ret;
}
static int __devexit ab8500_ponkey_remove(struct platform_device *pdev)
{
- struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev);
-
- free_irq(ponkey->irq_dbf, ponkey);
- free_irq(ponkey->irq_dbr, ponkey);
- input_unregister_device(ponkey->idev);
- kfree(ponkey);
-
- platform_set_drvdata(pdev, NULL);
+ struct ab8500_ponkey_info *info = platform_get_drvdata(pdev);
+ free_irq(info->irq_dbf, info);
+ free_irq(info->irq_dbr, info);
+ input_unregister_device(info->idev);
+ kfree(info);
return 0;
}
+static struct platform_device_id ab8500_ponkey_id_table[] = {
+ { "ab5500-onswa", (kernel_ulong_t)&ab5500_onswa, },
+ { "ab8500-poweron-key", (kernel_ulong_t)&ab8500_ponkey, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, ab8500_ponkey_id_table);
+
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
.owner = THIS_MODULE,
},
+ .id_table = ab8500_ponkey_id_table,
.probe = ab8500_ponkey_probe,
.remove = __devexit_p(ab8500_ponkey_remove),
};
diff --git a/drivers/input/misc/abx500-accdet.c b/drivers/input/misc/abx500-accdet.c
new file mode 100644
index 00000000000..4b5017a6e60
--- /dev/null
+++ b/drivers/input/misc/abx500-accdet.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GPL V2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/input/abx500-accdet.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/mfd/abx500.h>
+
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#ifdef CONFIG_SND_SOC_UX500_AB8500
+#include <sound/ux500_ab8500.h>
+#else
+#define ux500_ab8500_jack_report(i)
+#endif
+
+/* Unique value used to identify Headset button input device */
+#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn"
+#define BTN_INPUT_DEV_NAME "AB8500 Hs Button"
+
+#define DEBOUNCE_PLUG_EVENT_MS 100
+#define DEBOUNCE_PLUG_RETEST_MS 25
+#define DEBOUNCE_UNPLUG_EVENT_MS 0
+
+/* After being loaded, how fast the first check is to be made */
+#define INIT_DELAY_MS 3000
+
+/* Voltage limits (mV) for various types of AV Accessories */
+#define ACCESSORY_DET_VOL_DONTCARE -1
+#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0
+#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40
+#define ACCESSORY_U_HEADSET_DET_VOL_MIN 47
+#define ACCESSORY_U_HEADSET_DET_VOL_MAX 732
+#define ACCESSORY_U_HEADSET_ALT_DET_VOL_MIN 25
+#define ACCESSORY_U_HEADSET_ALT_DET_VOL_MAX 50
+#define ACCESSORY_CARKIT_DET_VOL_MIN 1100
+#define ACCESSORY_CARKIT_DET_VOL_MAX 1300
+#define ACCESSORY_HEADSET_DET_VOL_MIN 1301
+#define ACCESSORY_HEADSET_DET_VOL_MAX 2000
+#define ACCESSORY_OPENCABLE_DET_VOL_MIN 2001
+#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150
+
+
+/* Macros */
+
+/*
+ * Conviniency macros to check jack characteristics.
+ */
+#define jack_supports_mic(type) \
+ (type == JACK_TYPE_HEADSET || type == JACK_TYPE_CARKIT)
+#define jack_supports_spkr(type) \
+ ((type != JACK_TYPE_DISCONNECTED) && (type != JACK_TYPE_CONNECTED))
+#define jack_supports_buttons(type) \
+ ((type == JACK_TYPE_HEADSET) ||\
+ (type == JACK_TYPE_CARKIT) ||\
+ (type == JACK_TYPE_OPENCABLE) ||\
+ (type == JACK_TYPE_CONNECTED))
+
+
+/* Forward declarations */
+static void config_accdetect(struct abx500_ad *dd);
+static enum accessory_jack_type detect(struct abx500_ad *dd, int *required_det);
+
+/* Static data initialization */
+static struct accessory_detect_task detect_ops[] = {
+ {
+ .type = JACK_TYPE_DISCONNECTED,
+ .typename = "DISCONNECTED",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .maxvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_HEADPHONE,
+ .typename = "HEADPHONE",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_HEADPHONE_DET_VOL_MIN,
+ .maxvol = ACCESSORY_HEADPHONE_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_UNSUPPORTED_HEADSET,
+ .typename = "UNSUPPORTED HEADSET",
+ .meas_mv = 1,
+ .req_det_count = 2,
+ .minvol = ACCESSORY_U_HEADSET_DET_VOL_MIN,
+ .maxvol = ACCESSORY_U_HEADSET_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_U_HEADSET_ALT_DET_VOL_MIN,
+ .alt_maxvol = ACCESSORY_U_HEADSET_ALT_DET_VOL_MAX
+ },
+ {
+ .type = JACK_TYPE_OPENCABLE,
+ .typename = "OPENCABLE",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_OPENCABLE_DET_VOL_MIN,
+ .maxvol = ACCESSORY_OPENCABLE_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_CARKIT,
+ .typename = "CARKIT",
+ .meas_mv = 1,
+ .req_det_count = 1,
+ .minvol = ACCESSORY_CARKIT_DET_VOL_MIN,
+ .maxvol = ACCESSORY_CARKIT_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_HEADSET,
+ .typename = "HEADSET",
+ .meas_mv = 0,
+ .req_det_count = 2,
+ .minvol = ACCESSORY_HEADSET_DET_VOL_MIN,
+ .maxvol = ACCESSORY_HEADSET_DET_VOL_MAX,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ },
+ {
+ .type = JACK_TYPE_CONNECTED,
+ .typename = "CONNECTED",
+ .meas_mv = 0,
+ .req_det_count = 4,
+ .minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .maxvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_minvol = ACCESSORY_DET_VOL_DONTCARE,
+ .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE
+ }
+};
+
+static struct accessory_irq_descriptor *abx500_accdet_irq_desc;
+
+/*
+ * textual represenation of the accessory type
+ */
+static const char *accessory_str(enum accessory_jack_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(detect_ops); i++)
+ if (type == detect_ops[i].type)
+ return detect_ops[i].typename;
+
+ return "UNKNOWN?";
+}
+
+/*
+ * enables regulator but only if it has not been enabled earlier.
+ */
+void accessory_regulator_enable(struct abx500_ad *dd,
+ enum accessory_regulator reg)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (reg & dd->regu_desc[i].id) {
+ if (!dd->regu_desc[i].enabled) {
+ if (!regulator_enable(dd->regu_desc[i].handle))
+ dd->regu_desc[i].enabled = 1;
+ }
+ }
+ }
+}
+
+/*
+ * disables regulator but only if it has been previously enabled.
+ */
+void accessory_regulator_disable(struct abx500_ad *dd,
+ enum accessory_regulator reg)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (reg & dd->regu_desc[i].id) {
+ if (dd->regu_desc[i].enabled) {
+ if (!regulator_disable(dd->regu_desc[i].handle))
+ dd->regu_desc[i].enabled = 0;
+ }
+ }
+ }
+}
+
+/*
+ * frees previously retrieved regulators.
+ */
+static void free_regulators(struct abx500_ad *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ if (dd->regu_desc[i].handle) {
+ regulator_put(dd->regu_desc[i].handle);
+ dd->regu_desc[i].handle = NULL;
+ }
+ }
+}
+
+/*
+ * gets required regulators.
+ */
+static int create_regulators(struct abx500_ad *dd)
+{
+ int i;
+ int status = 0;
+
+ for (i = 0; i < dd->no_of_regu_desc; i++) {
+ struct regulator *regu =
+ regulator_get(&dd->pdev->dev, dd->regu_desc[i].name);
+ if (IS_ERR(regu)) {
+ status = PTR_ERR(regu);
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get supply '%s' (%d).\n",
+ __func__, dd->regu_desc[i].name, status);
+ free_regulators(dd);
+ goto out;
+ } else {
+ dd->regu_desc[i].handle = regu;
+ }
+ }
+
+out:
+ return status;
+}
+
+/*
+ * create input device for button press reporting
+ */
+static int create_btn_input_dev(struct abx500_ad *dd)
+{
+ int err;
+
+ dd->btn_input_dev = input_allocate_device();
+ if (!dd->btn_input_dev) {
+ dev_err(&dd->pdev->dev, "%s: Failed to allocate input dev.\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ input_set_capability(dd->btn_input_dev,
+ EV_KEY,
+ dd->pdata->btn_keycode);
+
+ dd->btn_input_dev->name = BTN_INPUT_DEV_NAME;
+ dd->btn_input_dev->uniq = BTN_INPUT_UNIQUE_VALUE;
+ dd->btn_input_dev->dev.parent = &dd->pdev->dev;
+
+ err = input_register_device(dd->btn_input_dev);
+ if (err) {
+ dev_err(&dd->pdev->dev,
+ "%s: register_input_device failed (%d).\n", __func__,
+ err);
+ input_free_device(dd->btn_input_dev);
+ dd->btn_input_dev = NULL;
+ goto out;
+ }
+out:
+ return err;
+}
+
+/*
+ * reports jack status
+ */
+void report_jack_status(struct abx500_ad *dd)
+{
+ int value = 0;
+
+ /* Never report possible open cable */
+ if (dd->jack_type == JACK_TYPE_OPENCABLE)
+ goto out;
+
+ /* Never report same state twice in a row */
+ if (dd->jack_type == dd->reported_jack_type)
+ goto out;
+ dd->reported_jack_type = dd->jack_type;
+
+ dev_dbg(&dd->pdev->dev, "Accessory: %s\n",
+ accessory_str(dd->jack_type));
+
+ /* Never report unsupported headset */
+ if (dd->jack_type == JACK_TYPE_UNSUPPORTED_HEADSET)
+ goto out;
+
+ if (dd->jack_type != JACK_TYPE_DISCONNECTED &&
+ dd->jack_type != JACK_TYPE_UNSPECIFIED)
+ value |= SND_JACK_MECHANICAL;
+ if (jack_supports_mic(dd->jack_type))
+ value |= SND_JACK_MICROPHONE;
+ if (jack_supports_spkr(dd->jack_type))
+ value |= (SND_JACK_HEADPHONE | SND_JACK_LINEOUT);
+ ux500_ab8500_jack_report(value);
+
+out: return;
+}
+
+/*
+ * worker routine to handle accessory unplug case
+ */
+void unplug_irq_handler_work(struct work_struct *work)
+{
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, unplug_irq_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ dd->jack_type = dd->jack_type_temp = JACK_TYPE_DISCONNECTED;
+ dd->jack_det_count = dd->total_jack_det_count = 0;
+ dd->btn_state = BUTTON_UNK;
+ config_accdetect(dd);
+
+ accessory_regulator_disable(dd, REGULATOR_ALL);
+
+ report_jack_status(dd);
+}
+
+/*
+ * interrupt service routine for accessory unplug.
+ */
+irqreturn_t unplug_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work,
+ msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupt service routine for accessory plug.
+ */
+irqreturn_t plug_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n",
+ __func__, irq);
+
+ switch (dd->jack_type) {
+ case JACK_TYPE_DISCONNECTED:
+ case JACK_TYPE_UNSPECIFIED:
+ queue_delayed_work(dd->irq_work_queue, &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS));
+ break;
+
+ default:
+ dev_err(&dd->pdev->dev, "%s: Unexpected plug IRQ\n", __func__);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * worker routine to perform detection.
+ */
+static void detect_work(struct work_struct *work)
+{
+ int req_det_count = 1;
+ enum accessory_jack_type new_type;
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, detect_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, AUDIO_IN);
+
+ new_type = detect(dd, &req_det_count);
+
+ dd->total_jack_det_count++;
+ if (dd->jack_type_temp == new_type) {
+ dd->jack_det_count++;
+ } else {
+ dd->jack_det_count = 1;
+ dd->jack_type_temp = new_type;
+ }
+
+ if (dd->total_jack_det_count >= MAX_DET_COUNT) {
+ dev_err(&dd->pdev->dev,
+ "%s: MAX_DET_COUNT(=%d) reached. Bailing out.\n",
+ __func__, MAX_DET_COUNT);
+ queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work,
+ msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS));
+ } else if (dd->jack_det_count >= req_det_count) {
+ dd->total_jack_det_count = dd->jack_det_count = 0;
+ dd->jack_type = new_type;
+ dd->detect_jiffies = jiffies;
+ report_jack_status(dd);
+ config_accdetect(dd);
+ } else {
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_RETEST_MS));
+ }
+}
+
+/*
+ * reports a button event (pressed, released).
+ */
+static void report_btn_event(struct abx500_ad *dd, int down)
+{
+ input_report_key(dd->btn_input_dev, dd->pdata->btn_keycode, down);
+ input_sync(dd->btn_input_dev);
+
+ dev_dbg(&dd->pdev->dev, "HS-BTN: %s\n", down ? "PRESSED" : "RELEASED");
+}
+
+/*
+ * interrupt service routine invoked when hs button is pressed down.
+ */
+irqreturn_t button_press_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ unsigned long accept_jiffies = dd->detect_jiffies +
+ msecs_to_jiffies(1000);
+ if (time_before(jiffies, accept_jiffies)) {
+ dev_dbg(&dd->pdev->dev, "%s: Skipped spurious btn press.\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ if (dd->jack_type == JACK_TYPE_OPENCABLE) {
+ /* Someting got connected to open cable -> detect.. */
+ dd->config_accdetect2_hw(dd, 0);
+ queue_delayed_work(dd->irq_work_queue, &dd->detect_work,
+ msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS));
+ return IRQ_HANDLED;
+ }
+
+ if (dd->btn_state == BUTTON_PRESSED)
+ return IRQ_HANDLED;
+
+ if (jack_supports_buttons(dd->jack_type)) {
+ dd->btn_state = BUTTON_PRESSED;
+ report_btn_event(dd, 1);
+ } else {
+ dd->btn_state = BUTTON_UNK;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupts service routine invoked when hs button is released.
+ */
+irqreturn_t button_release_irq_handler(int irq, void *_userdata)
+{
+ struct abx500_ad *dd = _userdata;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq);
+
+ if (dd->jack_type == JACK_TYPE_OPENCABLE)
+ return IRQ_HANDLED;
+
+ if (dd->btn_state != BUTTON_PRESSED)
+ return IRQ_HANDLED;
+
+ if (jack_supports_buttons(dd->jack_type)) {
+ report_btn_event(dd, 0);
+ dd->btn_state = BUTTON_RELEASED;
+ } else {
+ dd->btn_state = BUTTON_UNK;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * checks whether measured voltage is in given range. depending on arguments,
+ * voltage might be re-measured or previously measured voltage is reused.
+ */
+static int mic_vol_in_range(struct abx500_ad *dd,
+ int lo, int hi, int alt_lo, int alt_hi, int force_read)
+{
+ static int mv = MIN_MIC_POWER;
+ static int alt_mv = MIN_MIC_POWER;
+
+ if (mv == MIN_MIC_POWER || force_read)
+ mv = dd->meas_voltage_stable(dd);
+
+ if (mv < lo || mv > hi)
+ return 0;
+
+ if (ACCESSORY_DET_VOL_DONTCARE == alt_lo &&
+ ACCESSORY_DET_VOL_DONTCARE == alt_hi)
+ return 1;
+
+ if (alt_mv == MIN_MIC_POWER || force_read)
+ alt_mv = dd->meas_alt_voltage_stable(dd);
+
+ if (alt_mv < alt_lo || alt_mv > alt_hi)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * checks whether the currently connected HW is of given type.
+ */
+static int detect_hw(struct abx500_ad *dd,
+ struct accessory_detect_task *task)
+{
+ int status;
+
+ switch (task->type) {
+ case JACK_TYPE_DISCONNECTED:
+ dd->config_hw_test_plug_connected(dd, 1);
+ status = !dd->detect_plugged_in(dd);
+ break;
+ case JACK_TYPE_CONNECTED:
+ dd->config_hw_test_plug_connected(dd, 1);
+ status = dd->detect_plugged_in(dd);
+ break;
+ case JACK_TYPE_CARKIT:
+ case JACK_TYPE_HEADPHONE:
+ case JACK_TYPE_HEADSET:
+ case JACK_TYPE_UNSUPPORTED_HEADSET:
+ case JACK_TYPE_OPENCABLE:
+ status = mic_vol_in_range(dd,
+ task->minvol,
+ task->maxvol,
+ task->alt_minvol,
+ task->alt_maxvol,
+ task->meas_mv);
+ break;
+ default:
+ status = 0;
+ }
+
+ return status;
+}
+
+/*
+ * Tries to detect the currently attached accessory
+ */
+static enum accessory_jack_type detect(struct abx500_ad *dd,
+ int *req_det_count)
+{
+ enum accessory_jack_type type = JACK_TYPE_DISCONNECTED;
+ int i;
+
+ accessory_regulator_enable(dd, REGULATOR_VAUDIO | REGULATOR_AVSWITCH);
+ /* enable the VAMIC1 regulator */
+ dd->config_hw_test_basic_carkit(dd, 0);
+
+ for (i = 0; i < ARRAY_SIZE(detect_ops); ++i) {
+ if (detect_hw(dd, &detect_ops[i])) {
+ type = detect_ops[i].type;
+ *req_det_count = detect_ops[i].req_det_count;
+ break;
+ }
+ }
+
+ dd->config_hw_test_plug_connected(dd, 0);
+
+ if (jack_supports_buttons(type))
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+ else
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1 |
+ REGULATOR_AVSWITCH);
+
+ accessory_regulator_disable(dd, REGULATOR_VAUDIO);
+
+ return type;
+}
+
+/*
+ * registers to specific interrupt
+ */
+static void claim_irq(struct abx500_ad *dd, enum accessory_irq irq_id)
+{
+ int ret;
+ int irq;
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ if (abx500_accdet_irq_desc[irq_id].registered)
+ return;
+
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+ if (irq < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get irq %s\n", __func__,
+ abx500_accdet_irq_desc[irq_id].name);
+ return;
+ }
+
+ ret = request_threaded_irq(irq,
+ NULL,
+ abx500_accdet_irq_desc[irq_id].isr,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ abx500_accdet_irq_desc[irq_id].name,
+ dd);
+ if (ret != 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to claim irq %s (%d)\n",
+ __func__,
+ abx500_accdet_irq_desc[irq_id].name,
+ ret);
+ } else {
+ abx500_accdet_irq_desc[irq_id].registered = 1;
+ dev_dbg(&dd->pdev->dev, "%s: %s\n",
+ __func__, abx500_accdet_irq_desc[irq_id].name);
+ }
+}
+
+/*
+ * releases specific interrupt
+ */
+static void release_irq(struct abx500_ad *dd, enum accessory_irq irq_id)
+{
+ int irq;
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ if (!abx500_accdet_irq_desc[irq_id].registered)
+ return;
+
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+ if (irq < 0) {
+ dev_err(&dd->pdev->dev,
+ "%s: Failed to get irq %s (%d)\n",
+ __func__,
+ abx500_accdet_irq_desc[irq_id].name, irq);
+ } else {
+ free_irq(irq, dd);
+ abx500_accdet_irq_desc[irq_id].registered = 0;
+ dev_dbg(&dd->pdev->dev, "%s: %s\n",
+ __func__, abx500_accdet_irq_desc[irq_id].name);
+ }
+}
+
+/*
+ * configures interrupts + detection hardware to meet the requirements
+ * set by currently attached accessory type.
+ */
+static void config_accdetect(struct abx500_ad *dd)
+{
+ switch (dd->jack_type) {
+ case JACK_TYPE_UNSPECIFIED:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 0);
+
+ release_irq(dd, PLUG_IRQ);
+ release_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ break;
+
+ case JACK_TYPE_DISCONNECTED:
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ case JACK_TYPE_HEADPHONE:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 0);
+
+ claim_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ break;
+
+ case JACK_TYPE_UNSUPPORTED_HEADSET:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 1);
+
+ release_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ release_irq(dd, BUTTON_PRESS_IRQ);
+ release_irq(dd, BUTTON_RELEASE_IRQ);
+ if (dd->set_av_switch)
+ dd->set_av_switch(dd, NOT_SET);
+ break;
+
+ case JACK_TYPE_CONNECTED:
+ case JACK_TYPE_HEADSET:
+ case JACK_TYPE_CARKIT:
+ case JACK_TYPE_OPENCABLE:
+ dd->config_accdetect1_hw(dd, 1);
+ dd->config_accdetect2_hw(dd, 1);
+
+ release_irq(dd, PLUG_IRQ);
+ claim_irq(dd, UNPLUG_IRQ);
+ claim_irq(dd, BUTTON_PRESS_IRQ);
+ claim_irq(dd, BUTTON_RELEASE_IRQ);
+ break;
+
+ default:
+ dev_err(&dd->pdev->dev, "%s: Unknown type: %d\n",
+ __func__, dd->jack_type);
+ }
+}
+
+/*
+ * Deferred initialization of the work.
+ */
+static void init_work(struct work_struct *work)
+{
+ struct abx500_ad *dd = container_of(work,
+ struct abx500_ad, init_work.work);
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ dd->jack_type = dd->reported_jack_type = JACK_TYPE_UNSPECIFIED;
+ config_accdetect(dd);
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->detect_work,
+ msecs_to_jiffies(0));
+}
+
+/*
+ * performs platform device initialization
+ */
+static int abx500_accessory_init(struct platform_device *pdev)
+{
+ int ret;
+ struct abx500_ad *dd = (struct abx500_ad *)pdev->id_entry->driver_data;
+
+ dev_dbg(&pdev->dev, "Enter: %s\n", __func__);
+
+ dd->pdev = pdev;
+ dd->pdata = dd->get_platform_data(pdev);
+ if (IS_ERR(dd->pdata))
+ return PTR_ERR(dd->pdata);
+
+ if (dd->pdata->video_ctrl_gpio) {
+ ret = gpio_is_valid(dd->pdata->video_ctrl_gpio);
+ if (!ret) {
+ dev_err(&pdev->dev,
+ "%s: Video ctrl GPIO invalid (%d).\n", __func__,
+ dd->pdata->video_ctrl_gpio);
+
+ return ret;
+ }
+ ret = gpio_request(dd->pdata->video_ctrl_gpio,
+ "Video Control");
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Get video ctrl GPIO"
+ "failed.\n", __func__);
+ return ret;
+ }
+ }
+
+ if (dd->pdata->mic_ctrl) {
+ ret = gpio_is_valid(dd->pdata->mic_ctrl);
+ if (!ret) {
+ dev_err(&pdev->dev,
+ "%s: Mic ctrl GPIO invalid (%d).\n", __func__,
+ dd->pdata->mic_ctrl);
+
+ goto mic_ctrl_fail;
+ }
+ ret = gpio_request(dd->pdata->mic_ctrl,
+ "Mic Control");
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Get mic ctrl GPIO"
+ "failed.\n", __func__);
+ goto mic_ctrl_fail;
+ }
+ }
+
+ ret = create_btn_input_dev(dd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: create_button_input_dev failed.\n",
+ __func__);
+ goto fail_no_btn_input_dev;
+ }
+
+ ret = create_regulators(dd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s: failed to create regulators\n",
+ __func__);
+ goto fail_no_regulators;
+ }
+ dd->btn_state = BUTTON_UNK;
+
+ dd->irq_work_queue = create_singlethread_workqueue("abx500_accdet_wq");
+ if (!dd->irq_work_queue) {
+ dev_err(&pdev->dev, "%s: Failed to create wq\n", __func__);
+ ret = -ENOMEM;
+ goto fail_no_mem_for_wq;
+ }
+
+ dd->gpadc = dd->accdet_abx500_gpadc_get();
+
+ INIT_DELAYED_WORK(&dd->detect_work, detect_work);
+ INIT_DELAYED_WORK(&dd->unplug_irq_work, unplug_irq_handler_work);
+ INIT_DELAYED_WORK(&dd->init_work, init_work);
+
+ /* Deferred init/detect since no use for the info early in boot */
+ queue_delayed_work(dd->irq_work_queue,
+ &dd->init_work,
+ msecs_to_jiffies(INIT_DELAY_MS));
+
+ platform_set_drvdata(pdev, dd);
+
+ return 0;
+fail_no_mem_for_wq:
+ free_regulators(dd);
+fail_no_regulators:
+ input_unregister_device(dd->btn_input_dev);
+fail_no_btn_input_dev:
+ if (dd->pdata->mic_ctrl)
+ gpio_free(dd->pdata->mic_ctrl);
+mic_ctrl_fail:
+ if (dd->pdata->video_ctrl_gpio)
+ gpio_free(dd->pdata->video_ctrl_gpio);
+ return ret;
+}
+
+/*
+ * Performs platform device cleanup
+ */
+static void abx500_accessory_cleanup(struct abx500_ad *dd)
+{
+ dev_dbg(&dd->pdev->dev, "Enter: %s\n", __func__);
+
+ dd->jack_type = JACK_TYPE_UNSPECIFIED;
+ config_accdetect(dd);
+
+ if (dd->pdata->mic_ctrl)
+ gpio_free(dd->pdata->mic_ctrl);
+
+ if (dd->pdata->video_ctrl_gpio)
+ gpio_free(dd->pdata->video_ctrl_gpio);
+
+ input_unregister_device(dd->btn_input_dev);
+ free_regulators(dd);
+
+ cancel_delayed_work(&dd->detect_work);
+ cancel_delayed_work(&dd->unplug_irq_work);
+ cancel_delayed_work(&dd->init_work);
+ flush_workqueue(dd->irq_work_queue);
+ destroy_workqueue(dd->irq_work_queue);
+
+}
+
+static int __devinit abx500_acc_detect_probe(struct platform_device *pdev)
+{
+
+ return abx500_accessory_init(pdev);
+}
+
+static int __devexit abx500_acc_detect_remove(struct platform_device *pdev)
+{
+ abx500_accessory_cleanup(platform_get_drvdata(pdev));
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static int abx500_acc_detect_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+ int irq_id, irq;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ cancel_delayed_work_sync(&dd->unplug_irq_work);
+ cancel_delayed_work_sync(&dd->detect_work);
+ cancel_delayed_work_sync(&dd->init_work);
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) {
+ if (abx500_accdet_irq_desc[irq_id].registered == 1) {
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+
+ disable_irq(irq);
+ }
+ }
+
+ dd->turn_off_accdet_comparator(pdev);
+
+ if (dd->jack_type == JACK_TYPE_HEADSET)
+ accessory_regulator_disable(dd, REGULATOR_VAMIC1);
+
+ return 0;
+}
+
+static int abx500_acc_detect_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct abx500_ad *dd = platform_get_drvdata(pdev);
+ int irq_id, irq;
+
+ dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__);
+
+ if (dd->jack_type == JACK_TYPE_HEADSET)
+ accessory_regulator_enable(dd, REGULATOR_VAMIC1);
+
+ dd->turn_on_accdet_comparator(pdev);
+
+ if (dd->pdata->is_detection_inverted)
+ abx500_accdet_irq_desc = dd->irq_desc_inverted;
+ else
+ abx500_accdet_irq_desc = dd->irq_desc_norm;
+
+ for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) {
+ if (abx500_accdet_irq_desc[irq_id].registered == 1) {
+ irq = platform_get_irq_byname(
+ dd->pdev,
+ abx500_accdet_irq_desc[irq_id].name);
+
+ enable_irq(irq);
+
+ }
+ }
+
+ /* After resume, reinitialize */
+ dd->gpio35_dir_set = dd->accdet1_th_set = dd->accdet2_th_set = 0;
+ queue_delayed_work(dd->irq_work_queue, &dd->init_work, 0);
+
+ return 0;
+}
+#else
+#define abx500_acc_detect_suspend NULL
+#define abx500_acc_detect_resume NULL
+#endif
+
+static struct platform_device_id abx500_accdet_ids[] = {
+#ifdef CONFIG_INPUT_AB5500_ACCDET
+ { "ab5500-acc-det", (kernel_ulong_t)&ab5500_accessory_det_callbacks, },
+#endif
+#ifdef CONFIG_INPUT_AB8500_ACCDET
+ { "ab8500-acc-det", (kernel_ulong_t)&ab8500_accessory_det_callbacks, },
+#endif
+ { },
+};
+
+static const struct dev_pm_ops abx_ops = {
+ .suspend = abx500_acc_detect_suspend,
+ .resume = abx500_acc_detect_resume,
+};
+
+static struct platform_driver abx500_acc_detect_platform_driver = {
+ .driver = {
+ .name = "abx500-acc-det",
+ .owner = THIS_MODULE,
+ .pm = &abx_ops,
+ },
+ .probe = abx500_acc_detect_probe,
+ .id_table = abx500_accdet_ids,
+ .remove = __devexit_p(abx500_acc_detect_remove),
+};
+
+static int __init abx500_acc_detect_init(void)
+{
+ return platform_driver_register(&abx500_acc_detect_platform_driver);
+}
+
+static void __exit abx500_acc_detect_exit(void)
+{
+ platform_driver_unregister(&abx500_acc_detect_platform_driver);
+}
+
+module_init(abx500_acc_detect_init);
+module_exit(abx500_acc_detect_exit);
+
+MODULE_DESCRIPTION("ABx500 AV Accessory detection driver");
+MODULE_ALIAS("platform:abx500-acc-det");
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/ste_ff_vibra.c b/drivers/input/misc/ste_ff_vibra.c
new file mode 100644
index 00000000000..9038e6be046
--- /dev/null
+++ b/drivers/input/misc/ste_ff_vibra.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>
+ * for ST-Ericsson
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/ste_audio_io_vibrator.h>
+
+#define FF_VIBRA_DOWN 0x0000 /* 0 degrees */
+#define FF_VIBRA_LEFT 0x4000 /* 90 degrees */
+#define FF_VIBRA_UP 0x8000 /* 180 degrees */
+#define FF_VIBRA_RIGHT 0xC000 /* 270 degrees */
+
+/**
+ * struct vibra_info - Vibrator information structure
+ * @idev: Pointer to input device structure
+ * @vibra_workqueue: Pointer to vibrator workqueue structure
+ * @vibra_work: Vibrator work
+ * @direction: Vibration direction
+ * @speed: Vibration speed
+ *
+ * Structure vibra_info holds vibrator informations
+ **/
+struct vibra_info {
+ struct input_dev *idev;
+ struct workqueue_struct *vibra_workqueue;
+ struct work_struct vibra_work;
+ int direction;
+ unsigned char speed;
+};
+
+/**
+ * vibra_play_work() - Vibrator work, sets speed and direction
+ * @work: Pointer to work structure
+ *
+ * This function is called from workqueue, turns on/off vibrator
+ **/
+static void vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *vinfo = container_of(work,
+ struct vibra_info, vibra_work);
+ struct ste_vibra_speed left_speed = {
+ .positive = 0,
+ .negative = 0,
+ };
+ struct ste_vibra_speed right_speed = {
+ .positive = 0,
+ .negative = 0,
+ };
+
+ /* Divide by 2 because supported range by PWM is 0-100 */
+ vinfo->speed /= 2;
+
+ if ((vinfo->direction > FF_VIBRA_DOWN) &&
+ (vinfo->direction < FF_VIBRA_UP)) {
+ /* 1 - 179 degrees, turn on left vibrator */
+ left_speed.positive = vinfo->speed;
+ } else if (vinfo->direction > FF_VIBRA_UP) {
+ /* more than 180 degrees, turn on right vibrator */
+ right_speed.positive = vinfo->speed;
+ } else {
+ /* 0 (down) or 180 (up) degrees, turn on 2 vibrators */
+ left_speed.positive = vinfo->speed;
+ right_speed.positive = vinfo->speed;
+ }
+
+ ste_audioio_vibrator_pwm_control(STE_AUDIOIO_CLIENT_FF_VIBRA,
+ left_speed, right_speed);
+}
+
+/**
+ * vibra_play() - Memless device control function
+ * @idev: Pointer to input device structure
+ * @data: Pointer to private data (not used)
+ * @effect: Pointer to force feedback effect structure
+ *
+ * This function controls memless device
+ *
+ * Returns:
+ * 0 - success
+ **/
+static int vibra_play(struct input_dev *idev, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ vinfo->direction = effect->direction;
+ vinfo->speed = effect->u.rumble.strong_magnitude >> 8;
+ if (!vinfo->speed)
+ /* Shift weak magnitude to make it feelable on vibrator */
+ vinfo->speed = effect->u.rumble.weak_magnitude >> 9;
+
+ queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work);
+
+ return 0;
+}
+
+/**
+ * ste_ff_vibra_open() - Input device open function
+ * @idev: Pointer to input device structure
+ *
+ * This function is called on opening input device
+ *
+ * Returns:
+ * -ENOMEM - no memory left
+ * 0 - success
+ **/
+static int ste_ff_vibra_open(struct input_dev *idev)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ vinfo->vibra_workqueue =
+ create_singlethread_workqueue("ste_ff-ff-vibra");
+ if (!vinfo->vibra_workqueue) {
+ dev_err(&idev->dev, "couldn't create vibra workqueue\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * ste_ff_vibra_close() - Input device close function
+ * @idev: Pointer to input device structure
+ *
+ * This function is called on closing input device
+ **/
+static void ste_ff_vibra_close(struct input_dev *idev)
+{
+ struct vibra_info *vinfo = input_get_drvdata(idev);
+
+ cancel_work_sync(&vinfo->vibra_work);
+ INIT_WORK(&vinfo->vibra_work, vibra_play_work);
+ destroy_workqueue(vinfo->vibra_workqueue);
+ vinfo->vibra_workqueue = NULL;
+}
+
+static int __devinit ste_ff_vibra_probe(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo;
+ int ret;
+
+ vinfo = kmalloc(sizeof *vinfo, GFP_KERNEL);
+ if (!vinfo) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vinfo->idev = input_allocate_device();
+ if (!vinfo->idev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ ret = -ENOMEM;
+ goto exit_vinfo_free;
+ }
+
+ vinfo->idev->name = "ste-ff-vibra";
+ vinfo->idev->dev.parent = pdev->dev.parent;
+ vinfo->idev->open = ste_ff_vibra_open;
+ vinfo->idev->close = ste_ff_vibra_close;
+ INIT_WORK(&vinfo->vibra_work, vibra_play_work);
+ __set_bit(FF_RUMBLE, vinfo->idev->ffbit);
+
+ ret = input_ff_create_memless(vinfo->idev, NULL, vibra_play);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create memless device\n");
+ goto exit_idev_free;
+ }
+
+ ret = input_register_device(vinfo->idev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto exit_destroy_memless;
+ }
+
+ input_set_drvdata(vinfo->idev, vinfo);
+ platform_set_drvdata(pdev, vinfo);
+ return 0;
+
+exit_destroy_memless:
+ input_ff_destroy(vinfo->idev);
+exit_idev_free:
+ input_free_device(vinfo->idev);
+exit_vinfo_free:
+ kfree(vinfo);
+ return ret;
+}
+
+static int __devexit ste_ff_vibra_remove(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo = platform_get_drvdata(pdev);
+
+ /*
+ * Function device_release() will call input_dev_release()
+ * which will free ff and input device. No need to call
+ * input_ff_destroy() and input_free_device() explicitly.
+ */
+ input_unregister_device(vinfo->idev);
+ kfree(vinfo);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ste_ff_vibra_driver = {
+ .driver = {
+ .name = "ste_ff_vibra",
+ .owner = THIS_MODULE,
+ },
+ .probe = ste_ff_vibra_probe,
+ .remove = __devexit_p(ste_ff_vibra_remove)
+};
+
+static int __init ste_ff_vibra_init(void)
+{
+ return platform_driver_register(&ste_ff_vibra_driver);
+}
+module_init(ste_ff_vibra_init);
+
+static void __exit ste_ff_vibra_exit(void)
+{
+ platform_driver_unregister(&ste_ff_vibra_driver);
+}
+module_exit(ste_ff_vibra_exit);
+
+MODULE_AUTHOR("Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>");
+MODULE_DESCRIPTION("STE Force Feedback Vibrator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f2d03c06c2d..d6ae75865e4 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2009
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* License terms:GNU General Public License (GPL) version 2
*/
@@ -12,13 +12,14 @@
#include <linux/input.h>
#include <linux/input/bu21013.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#define PEN_DOWN_INTR 0
-#define MAX_FINGERS 2
#define RESET_DELAY 30
-#define PENUP_TIMEOUT (10)
+#define PENUP_TIMEOUT 2 /* 2msecs */
+#define SCALE_FACTOR 1000
#define DELTA_MIN 16
#define MASK_BITS 0x03
#define SHIFT_8 8
@@ -131,7 +132,7 @@
#define BU21013_NUMBER_OF_X_SENSORS (6)
#define BU21013_NUMBER_OF_Y_SENSORS (11)
-#define DRIVER_TP "bu21013_tp"
+#define DRIVER_TP "bu21013_ts"
/**
* struct bu21013_ts_data - touch panel data structure
@@ -142,6 +143,12 @@
* @in_dev: pointer to the input device structure
* @intr_pin: interrupt pin value
* @regulator: pointer to the Regulator used for touch screen
+ * @enable: variable to indicate the enable/disable of touch screen
+ * @ext_clk_enable: true if running on ext clk
+ * @ext_clk_state: Saved state for suspend/resume of ext clk
+ * @factor_x: x scale factor
+ * @factor_y: y scale factor
+ * @tpclk: pointer to clock structure
*
* Touch panel device data structure
*/
@@ -149,12 +156,226 @@ struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
bool touch_stopped;
- const struct bu21013_platform_device *chip;
+ struct bu21013_platform_device *chip;
struct input_dev *in_dev;
unsigned int intr_pin;
struct regulator *regulator;
+ bool enable;
+ bool ext_clk_enable;
+ bool ext_clk_state;
+ unsigned int factor_x;
+ unsigned int factor_y;
+ struct clk *tpclk;
};
+static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk);
+
+/**
+ * bu21013_ext_clk() - enable/disable the external clock
+ * @pdata: touch screen data
+ * @enable: enable external clock
+ * @reconfig: reconfigure chip upon external clock off.
+ *
+ * This function used to enable or disable the external clock and possible
+ * reconfigure hw.
+ */
+static int bu21013_ext_clk(struct bu21013_ts_data *pdata, bool enable,
+ bool reconfig)
+{
+ int retval = 0;
+
+ if (!pdata->tpclk || pdata->ext_clk_enable == enable)
+ return retval;
+
+ if (enable) {
+ pdata->ext_clk_enable = true;
+ clk_enable(pdata->tpclk);
+ retval = bu21013_init_chip(pdata, true);
+ } else {
+ pdata->ext_clk_enable = false;
+ if (reconfig)
+ retval = bu21013_init_chip(pdata, false);
+ clk_disable(pdata->tpclk);
+ }
+ return retval;
+}
+
+/**
+ * bu21013_enable() - enable the touch driver event
+ * @pdata: touch screen data
+ *
+ * This function used to enable the driver and returns integer
+ */
+static int bu21013_enable(struct bu21013_ts_data *pdata)
+{
+ int retval;
+
+ if (pdata->regulator)
+ regulator_enable(pdata->regulator);
+
+ if (pdata->chip->cs_en) {
+ retval = pdata->chip->cs_en(pdata->chip->cs_pin);
+ if (retval < 0) {
+ dev_err(&pdata->client->dev, "enable hw failed\n");
+ return retval;
+ }
+ }
+
+ if (pdata->ext_clk_state)
+ retval = bu21013_ext_clk(pdata, true, true);
+ else
+ retval = bu21013_init_chip(pdata, false);
+
+ if (retval < 0) {
+ dev_err(&pdata->client->dev, "enable hw failed\n");
+ return retval;
+ }
+ pdata->touch_stopped = false;
+ enable_irq(pdata->chip->irq);
+
+ return 0;
+}
+
+/**
+ * bu21013_disable() - disable the touch driver event
+ * @pdata: touch screen data
+ *
+ * This function used to disable the driver and returns integer
+ */
+static void bu21013_disable(struct bu21013_ts_data *pdata)
+{
+ pdata->touch_stopped = true;
+
+ pdata->ext_clk_state = pdata->ext_clk_enable;
+ (void) bu21013_ext_clk(pdata, false, false);
+
+ disable_irq(pdata->chip->irq);
+ if (pdata->chip->cs_dis)
+ pdata->chip->cs_dis(pdata->chip->cs_pin);
+ if (pdata->regulator)
+ regulator_disable(pdata->regulator);
+}
+
+/**
+ * bu21013_show_attr_enable() - show the touch screen controller status
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ *
+ * This funtion is used to show whether the touch screen is enabled or
+ * disabled
+ */
+static ssize_t bu21013_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", pdata->enable);
+}
+
+/**
+ * bu21013_store_attr_enable() - Enable/Disable the touchscreen.
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ * @count: number of parameters
+ *
+ * This funtion is used to enable or disable the touch screen controller.
+ */
+static ssize_t bu21013_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long val;
+
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->enable != val) {
+ pdata->enable = val ? true : false;
+ if (pdata->enable) {
+ ret = bu21013_enable(pdata);
+ if (ret < 0)
+ return ret;
+ } else
+ bu21013_disable(pdata);
+ }
+ return count;
+}
+
+/**
+ * bu21013_show_attr_extclk() - shows the external clock status
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ *
+ * This funtion is used to show whether the external clock for the touch
+ * screen is enabled or disabled.
+ */
+static ssize_t bu21013_show_attr_extclk(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", pdata->ext_clk_enable);
+}
+
+/**
+ * bu21013_store_attr_extclk() - Enable/Disable the external clock
+ * for the tocuh screen controller.
+ * @dev: pointer to device structure
+ * @attr: pointer to device attribute
+ * @buf: parameter buffer
+ * @count: number of parameters
+ *
+ * This funtion is used enabled or disable the external clock for the touch
+ * screen controller.
+ */
+static ssize_t bu21013_store_attr_extclk(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval = 0;
+ struct bu21013_ts_data *pdata = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->chip->has_ext_clk) {
+ if (pdata->enable)
+ retval = bu21013_ext_clk(pdata, val, true);
+ else
+ pdata->ext_clk_state = val;
+ if (retval < 0)
+ return retval;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ bu21013_show_attr_enable, bu21013_store_attr_enable);
+
+static DEVICE_ATTR(ext_clk, S_IWUSR | S_IRUGO,
+ bu21013_show_attr_extclk, bu21013_store_attr_extclk);
+
+
+static struct attribute *bu21013_attribute[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_ext_clk.attr,
+ NULL,
+};
+
+static struct attribute_group bu21013_attr_group = {
+ .attrs = bu21013_attribute,
+};
+
+
/**
* bu21013_read_block_data(): read the touch co-ordinates
* @data: bu21013_ts_data structure pointer
@@ -204,12 +425,14 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data)
if (!has_x_sensors || !has_y_sensors)
return 0;
- for (i = 0; i < MAX_FINGERS; i++) {
+ for (i = 0; i < 2; i++) {
const u8 *p = &buf[4 * i + 3];
unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS);
unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS);
if (x == 0 || y == 0)
continue;
+ x = x * data->factor_x / SCALE_FACTOR;
+ y = y * data->factor_y / SCALE_FACTOR;
pos_x[finger_down_count] = x;
pos_y[finger_down_count] = y;
finger_down_count++;
@@ -217,21 +440,21 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data)
if (finger_down_count) {
if (finger_down_count == 2 &&
- (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
- abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) {
+ (abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
+ abs(pos_y[0] - pos_y[1]) < DELTA_MIN))
return 0;
- }
for (i = 0; i < finger_down_count; i++) {
- if (data->chip->x_flip)
- pos_x[i] = data->chip->touch_x_max - pos_x[i];
- if (data->chip->y_flip)
- pos_y[i] = data->chip->touch_y_max - pos_y[i];
-
- input_report_abs(data->in_dev,
- ABS_MT_POSITION_X, pos_x[i]);
- input_report_abs(data->in_dev,
- ABS_MT_POSITION_Y, pos_y[i]);
+ if (data->chip->portrait && data->chip->x_flip)
+ pos_x[i] = data->chip->x_max_res - pos_x[i];
+ if (data->chip->portrait && data->chip->y_flip)
+ pos_y[i] = data->chip->y_max_res - pos_y[i];
+ input_report_abs(data->in_dev, ABS_MT_TOUCH_MAJOR,
+ max(pos_x[i], pos_y[i]));
+ input_report_abs(data->in_dev, ABS_MT_POSITION_X,
+ pos_x[i]);
+ input_report_abs(data->in_dev, ABS_MT_POSITION_Y,
+ pos_y[i]);
input_mt_sync(data->in_dev);
}
} else
@@ -261,24 +484,23 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
dev_err(&i2c->dev, "bu21013_do_touch_report failed\n");
return IRQ_NONE;
}
-
data->intr_pin = data->chip->irq_read_val();
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
- msecs_to_jiffies(2));
+ msecs_to_jiffies(PENUP_TIMEOUT));
} while (!data->intr_pin && !data->touch_stopped);
-
return IRQ_HANDLED;
}
/**
* bu21013_init_chip() - power on sequence for the bu21013 controller
* @data: device structure pointer
+ * @on_ext_clk: Run on external clock
*
* This function is used to power on
* the bu21013 controller and returns integer.
*/
-static int bu21013_init_chip(struct bu21013_ts_data *data)
+static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk)
{
int retval;
struct i2c_client *i2c = data->client;
@@ -297,28 +519,24 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_SENSOR_0_7 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_8_15_REG,
BU21013_SENSORS_EN_8_15);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_8_15 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_16_23_REG,
BU21013_SENSORS_EN_16_23);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_16_23 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE1_REG,
(BU21013_POS_MODE1_0 | BU21013_POS_MODE1_1));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_POS_MODE1 reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE2_REG,
(BU21013_POS_MODE2_ZERO | BU21013_POS_MODE2_AVG1 |
BU21013_POS_MODE2_AVG2 | BU21013_POS_MODE2_EN_RAW |
@@ -327,8 +545,7 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_POS_MODE2 reg write failed\n");
return retval;
}
-
- if (data->chip->ext_clk)
+ if (on_ext_clk)
retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
(BU21013_CLK_MODE_EXT | BU21013_CLK_MODE_CALIB));
else
@@ -338,21 +555,18 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_CLK_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_IDLE_REG,
(BU21013_IDLET_0 | BU21013_IDLE_INTERMIT_EN));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_IDLE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_INT_MODE_REG,
BU21013_INT_MODE_LEVEL);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_INT_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_FILTER_REG,
(BU21013_DELTA_0_6 |
BU21013_FILTER_EN));
@@ -367,14 +581,12 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_TH_ON reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG,
BU21013_TH_OFF_4 | BU21013_TH_OFF_3);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_GAIN_REG,
(BU21013_GAIN_0 | BU21013_GAIN_1));
if (retval < 0) {
@@ -388,7 +600,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_OFFSET_MODE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_XY_EDGE_REG,
(BU21013_X_EDGE_0 | BU21013_X_EDGE_2 |
BU21013_Y_EDGE_1 | BU21013_Y_EDGE_3));
@@ -396,7 +607,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
dev_err(&i2c->dev, "BU21013_XY_EDGE reg write failed\n");
return retval;
}
-
retval = i2c_smbus_write_byte_data(i2c, BU21013_DONE_REG,
BU21013_DONE);
if (retval < 0) {
@@ -404,25 +614,15 @@ static int bu21013_init_chip(struct bu21013_ts_data *data)
return retval;
}
- return 0;
+ data->factor_x = (data->chip->x_max_res * SCALE_FACTOR /
+ data->chip->touch_x_max);
+ data->factor_y = (data->chip->y_max_res * SCALE_FACTOR /
+ data->chip->touch_y_max);
+ return retval;
}
/**
- * bu21013_free_irq() - frees IRQ registered for touchscreen
- * @bu21013_data: device structure pointer
- *
- * This function signals interrupt thread to stop processing and
- * frees interrupt.
- */
-static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
-{
- bu21013_data->touch_stopped = true;
- wake_up(&bu21013_data->wait);
- free_irq(bu21013_data->chip->irq, bu21013_data);
-}
-
-/**
- * bu21013_probe() - initializes the i2c-client touchscreen driver
+ * bu21013_probe() - initialzes the i2c-client touchscreen driver
* @client: i2c client structure pointer
* @id: i2c device id pointer
*
@@ -432,11 +632,11 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
static int __devinit bu21013_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ int retval;
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
- const struct bu21013_platform_device *pdata =
+ struct bu21013_platform_device *pdata =
client->dev.platform_data;
- int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -446,53 +646,72 @@ static int __devinit bu21013_probe(struct i2c_client *client,
if (!pdata) {
dev_err(&client->dev, "platform data not defined\n");
- return -EINVAL;
+ retval = -EINVAL;
+ return retval;
}
bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL);
- in_dev = input_allocate_device();
- if (!bu21013_data || !in_dev) {
+ if (!bu21013_data) {
dev_err(&client->dev, "device memory alloc failed\n");
- error = -ENOMEM;
- goto err_free_mem;
+ retval = -ENOMEM;
+ return retval;
+ }
+ /* allocate input device */
+ in_dev = input_allocate_device();
+ if (!in_dev) {
+ dev_err(&client->dev, "input device memory alloc failed\n");
+ retval = -ENOMEM;
+ goto err_alloc;
}
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
- bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+ bu21013_data->regulator = regulator_get(&client->dev, "avdd");
if (IS_ERR(bu21013_data->regulator)) {
- dev_err(&client->dev, "regulator_get failed\n");
- error = PTR_ERR(bu21013_data->regulator);
- goto err_free_mem;
+ dev_warn(&client->dev, "regulator_get failed\n");
+ bu21013_data->regulator = NULL;
}
-
- error = regulator_enable(bu21013_data->regulator);
- if (error < 0) {
- dev_err(&client->dev, "regulator enable failed\n");
- goto err_put_regulator;
- }
-
- bu21013_data->touch_stopped = false;
- init_waitqueue_head(&bu21013_data->wait);
+ if (bu21013_data->regulator)
+ regulator_enable(bu21013_data->regulator);
/* configure the gpio pins */
if (pdata->cs_en) {
- error = pdata->cs_en(pdata->cs_pin);
- if (error < 0) {
+ retval = pdata->cs_en(pdata->cs_pin);
+ if (retval < 0) {
dev_err(&client->dev, "chip init failed\n");
- goto err_disable_regulator;
+ goto err_init_cs;
+ }
+ }
+
+ if (pdata->has_ext_clk) {
+ bu21013_data->tpclk = clk_get(&client->dev, NULL);
+ if (IS_ERR(bu21013_data->tpclk)) {
+ dev_warn(&client->dev, "get extern clock failed\n");
+ bu21013_data->tpclk = NULL;
+ }
+ }
+
+ if (pdata->enable_ext_clk && bu21013_data->tpclk) {
+ retval = clk_enable(bu21013_data->tpclk);
+ if (retval < 0) {
+ dev_err(&client->dev, "clock enable failed\n");
+ goto err_ext_clk;
}
+ bu21013_data->ext_clk_enable = true;
}
/* configure the touch panel controller */
- error = bu21013_init_chip(bu21013_data);
- if (error) {
+ retval = bu21013_init_chip(bu21013_data, bu21013_data->ext_clk_enable);
+ if (retval < 0) {
dev_err(&client->dev, "error in bu21013 config\n");
- goto err_cs_disable;
+ goto err_init_config;
}
+ init_waitqueue_head(&bu21013_data->wait);
+ bu21013_data->touch_stopped = false;
+
/* register the device to input subsystem */
in_dev->name = DRIVER_TP;
in_dev->id.bustype = BUS_I2C;
@@ -503,44 +722,63 @@ static int __devinit bu21013_probe(struct i2c_client *client,
__set_bit(EV_ABS, in_dev->evbit);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
- pdata->touch_x_max, 0, 0);
+ pdata->x_max_res, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
- pdata->touch_y_max, 0, 0);
+ pdata->y_max_res, 0, 0);
+ input_set_abs_params(in_dev, ABS_MT_TOUCH_MAJOR, 0,
+ max(pdata->x_max_res , pdata->y_max_res), 0, 0);
input_set_drvdata(in_dev, bu21013_data);
-
- error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
- DRIVER_TP, bu21013_data);
- if (error) {
+ retval = input_register_device(in_dev);
+ if (retval)
+ goto err_input_register;
+
+ retval = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
+ (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+ DRIVER_TP, bu21013_data);
+ if (retval) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
- goto err_cs_disable;
+ goto err_init_irq;
}
+ bu21013_data->enable = true;
+ i2c_set_clientdata(client, bu21013_data);
- error = input_register_device(in_dev);
- if (error) {
- dev_err(&client->dev, "failed to register input device\n");
- goto err_free_irq;
+ /* sysfs implementation for dynamic enable/disable the input event */
+ retval = sysfs_create_group(&client->dev.kobj, &bu21013_attr_group);
+ if (retval) {
+ dev_err(&client->dev, "failed to create sysfs entries\n");
+ goto err_sysfs_create;
}
- device_init_wakeup(&client->dev, pdata->wakeup);
- i2c_set_clientdata(client, bu21013_data);
-
- return 0;
+ return retval;
-err_free_irq:
- bu21013_free_irq(bu21013_data);
-err_cs_disable:
- pdata->cs_dis(pdata->cs_pin);
-err_disable_regulator:
- regulator_disable(bu21013_data->regulator);
-err_put_regulator:
- regulator_put(bu21013_data->regulator);
-err_free_mem:
- input_free_device(in_dev);
+err_sysfs_create:
+ free_irq(pdata->irq, bu21013_data);
+ i2c_set_clientdata(client, NULL);
+err_init_irq:
+ input_unregister_device(bu21013_data->in_dev);
+err_input_register:
+ wake_up(&bu21013_data->wait);
+err_init_config:
+ if (bu21013_data->tpclk) {
+ if (bu21013_data->ext_clk_enable)
+ clk_disable(bu21013_data->tpclk);
+ clk_put(bu21013_data->tpclk);
+ }
+err_ext_clk:
+ if (pdata->cs_dis)
+ pdata->cs_dis(pdata->cs_pin);
+err_init_cs:
+ if (bu21013_data->regulator) {
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+ }
+ input_free_device(bu21013_data->in_dev);
+err_alloc:
kfree(bu21013_data);
- return error;
+ return retval;
}
+
/**
* bu21013_remove() - removes the i2c-client touchscreen driver
* @client: i2c client structure pointer
@@ -552,19 +790,24 @@ static int __devexit bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
- bu21013_free_irq(bu21013_data);
-
+ bu21013_data->touch_stopped = true;
+ sysfs_remove_group(&client->dev.kobj, &bu21013_attr_group);
+ wake_up(&bu21013_data->wait);
+ free_irq(bu21013_data->chip->irq, bu21013_data);
bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
-
input_unregister_device(bu21013_data->in_dev);
- regulator_disable(bu21013_data->regulator);
- regulator_put(bu21013_data->regulator);
-
+ if (bu21013_data->tpclk) {
+ if (bu21013_data->ext_clk_enable)
+ clk_disable(bu21013_data->tpclk);
+ clk_put(bu21013_data->tpclk);
+ }
+ if (bu21013_data->regulator) {
+ regulator_disable(bu21013_data->regulator);
+ regulator_put(bu21013_data->regulator);
+ }
kfree(bu21013_data);
- device_init_wakeup(&client->dev, false);
-
return 0;
}
@@ -579,15 +822,8 @@ static int __devexit bu21013_remove(struct i2c_client *client)
static int bu21013_suspend(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
- struct i2c_client *client = bu21013_data->client;
- bu21013_data->touch_stopped = true;
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(bu21013_data->chip->irq);
- else
- disable_irq(bu21013_data->chip->irq);
-
- regulator_disable(bu21013_data->regulator);
+ bu21013_disable(bu21013_data);
return 0;
}
@@ -602,29 +838,8 @@ static int bu21013_suspend(struct device *dev)
static int bu21013_resume(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
- struct i2c_client *client = bu21013_data->client;
- int retval;
- retval = regulator_enable(bu21013_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "bu21013 regulator enable failed\n");
- return retval;
- }
-
- retval = bu21013_init_chip(bu21013_data);
- if (retval < 0) {
- dev_err(&client->dev, "bu21013 controller config failed\n");
- return retval;
- }
-
- bu21013_data->touch_stopped = false;
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(bu21013_data->chip->irq);
- else
- enable_irq(bu21013_data->chip->irq);
-
- return 0;
+ return bu21013_enable(bu21013_data);
}
static const struct dev_pm_ops bu21013_dev_pm_ops = {
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c
new file mode 100644
index 00000000000..5729602cbb6
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c
@@ -0,0 +1,675 @@
+/* drivers/input/keyboard/synaptics_i2c_rmi.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/hrtimer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/synaptics_i2c_rmi.h>
+
+static struct workqueue_struct *synaptics_wq;
+
+struct synaptics_ts_data {
+ uint16_t addr;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ int use_irq;
+ bool has_relative_report;
+ struct hrtimer timer;
+ struct work_struct work;
+ uint16_t max[2];
+ int snap_state[2][2];
+ int snap_down_on[2];
+ int snap_down_off[2];
+ int snap_up_on[2];
+ int snap_up_off[2];
+ int snap_down[2];
+ int snap_up[2];
+ uint32_t flags;
+ int reported_finger_count;
+ int8_t sensitivity_adjust;
+ int (*power)(int on);
+ struct early_suspend early_suspend;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h);
+static void synaptics_ts_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_init_panel(struct synaptics_ts_data *ts)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_page_select_failed;
+ }
+ ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n");
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0x44,
+ ts->sensitivity_adjust);
+ if (ret < 0)
+ pr_err("synaptics_ts: failed to set Sensitivity Adjust\n");
+
+err_page_select_failed:
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */
+ if (ret < 0)
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n");
+ return ret;
+}
+
+static void synaptics_ts_work_func(struct work_struct *work)
+{
+ int i;
+ int ret;
+ int bad_data = 0;
+ struct i2c_msg msg[2];
+ uint8_t start_reg;
+ uint8_t buf[15];
+ struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work);
+ int buf_len = ts->has_relative_report ? 15 : 13;
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &start_reg;
+ start_reg = 0x00;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = buf_len;
+ msg[1].buf = buf;
+
+ /* printk("synaptics_ts_work_func\n"); */
+ for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) {
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n");
+ bad_data = 1;
+ } else {
+ /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */
+ /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */
+ /* buf[0], buf[1], buf[2], buf[3], */
+ /* buf[4], buf[5], buf[6], buf[7], */
+ /* buf[8], buf[9], buf[10], buf[11], */
+ /* buf[12], buf[13], buf[14], ret); */
+ if ((buf[buf_len - 1] & 0xc0) != 0x40) {
+ printk(KERN_WARNING "synaptics_ts_work_func:"
+ " bad read %x %x %x %x %x %x %x %x %x"
+ " %x %x %x %x %x %x, ret %d\n",
+ buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11],
+ buf[12], buf[13], buf[14], ret);
+ if (bad_data)
+ synaptics_init_panel(ts);
+ bad_data = 1;
+ continue;
+ }
+ bad_data = 0;
+ if ((buf[buf_len - 1] & 1) == 0) {
+ /* printk("read %d coordinates\n", i); */
+ break;
+ } else {
+ int pos[2][2];
+ int f, a;
+ int base;
+ /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */
+ /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */
+ int z = buf[1];
+ int w = buf[0] >> 4;
+ int finger = buf[0] & 7;
+
+ /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */
+ /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */
+ /* int z2 = buf[1+6]; */
+ /* int w2 = buf[0+6] >> 4; */
+ /* int finger2 = buf[0+6] & 7; */
+
+ /* int dx = (int8_t)buf[12]; */
+ /* int dy = (int8_t)buf[13]; */
+ int finger2_pressed;
+
+ /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */
+ /* x, y, z, w, finger, */
+ /* x2, y2, z2, w2, finger2, */
+ /* dx, dy); */
+
+ base = 2;
+ for (f = 0; f < 2; f++) {
+ uint32_t flip_flag = SYNAPTICS_FLIP_X;
+ for (a = 0; a < 2; a++) {
+ int p = buf[base + 1];
+ p |= (uint16_t)(buf[base] & 0x1f) << 8;
+ if (ts->flags & flip_flag)
+ p = ts->max[a] - p;
+ if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) {
+ if (ts->snap_state[f][a]) {
+ if (p <= ts->snap_down_off[a])
+ p = ts->snap_down[a];
+ else if (p >= ts->snap_up_off[a])
+ p = ts->snap_up[a];
+ else
+ ts->snap_state[f][a] = 0;
+ } else {
+ if (p <= ts->snap_down_on[a]) {
+ p = ts->snap_down[a];
+ ts->snap_state[f][a] = 1;
+ } else if (p >= ts->snap_up_on[a]) {
+ p = ts->snap_up[a];
+ ts->snap_state[f][a] = 1;
+ }
+ }
+ }
+ pos[f][a] = p;
+ base += 2;
+ flip_flag <<= 1;
+ }
+ base += 2;
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(pos[f][0], pos[f][1]);
+ }
+ if (z) {
+ input_report_abs(ts->input_dev, ABS_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
+ }
+ input_report_abs(ts->input_dev, ABS_PRESSURE, z);
+ input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
+ input_report_key(ts->input_dev, BTN_TOUCH, finger);
+ finger2_pressed = finger > 1 && finger != 7;
+ input_report_key(ts->input_dev, BTN_2, finger2_pressed);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
+ }
+
+ if (!finger)
+ z = 0;
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]);
+ input_mt_sync(ts->input_dev);
+ if (finger2_pressed) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]);
+ input_mt_sync(ts->input_dev);
+ } else if (ts->reported_finger_count > 1) {
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
+ }
+ ts->reported_finger_count = finger;
+ input_sync(ts->input_dev);
+ }
+ }
+ }
+ if (ts->use_irq)
+ enable_irq(ts->client->irq);
+}
+
+static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
+{
+ struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer);
+ /* printk("synaptics_ts_timer_func\n"); */
+
+ queue_work(synaptics_wq, &ts->work);
+
+ hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
+{
+ struct synaptics_ts_data *ts = dev_id;
+
+ /* printk("synaptics_ts_irq_handler\n"); */
+ disable_irq_nosync(ts->client->irq);
+ queue_work(synaptics_wq, &ts->work);
+ return IRQ_HANDLED;
+}
+
+static int synaptics_ts_probe(
+ struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct synaptics_ts_data *ts;
+ uint8_t buf0[4];
+ uint8_t buf1[8];
+ struct i2c_msg msg[2];
+ int ret = 0;
+ uint16_t max_x, max_y;
+ int fuzz_x, fuzz_y, fuzz_p, fuzz_w;
+ struct synaptics_i2c_rmi_platform_data *pdata;
+ unsigned long irqflags;
+ int inactive_area_left;
+ int inactive_area_right;
+ int inactive_area_top;
+ int inactive_area_bottom;
+ int snap_left_on;
+ int snap_left_off;
+ int snap_right_on;
+ int snap_right_off;
+ int snap_top_on;
+ int snap_top_off;
+ int snap_bottom_on;
+ int snap_bottom_off;
+ uint32_t panel_version;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_check_functionality_failed;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts == NULL) {
+ ret = -ENOMEM;
+ goto err_alloc_data_failed;
+ }
+ INIT_WORK(&ts->work, synaptics_ts_work_func);
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ pdata = client->dev.platform_data;
+ if (pdata)
+ ts->power = pdata->power;
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_ts_probe power on failed\n");
+ goto err_power_failed;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ /* fail? */
+ }
+ {
+ int retry = 10;
+ while (retry-- > 0) {
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe4);
+ if (ret >= 0)
+ break;
+ msleep(100);
+ }
+ }
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret);
+ panel_version = ret << 8;
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe5);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret);
+ panel_version |= ret;
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xe3);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret);
+
+ if (pdata) {
+ while (pdata->version > panel_version)
+ pdata++;
+ ts->flags = pdata->flags;
+ ts->sensitivity_adjust = pdata->sensitivity_adjust;
+ irqflags = pdata->irqflags;
+ inactive_area_left = pdata->inactive_left;
+ inactive_area_right = pdata->inactive_right;
+ inactive_area_top = pdata->inactive_top;
+ inactive_area_bottom = pdata->inactive_bottom;
+ snap_left_on = pdata->snap_left_on;
+ snap_left_off = pdata->snap_left_off;
+ snap_right_on = pdata->snap_right_on;
+ snap_right_off = pdata->snap_right_off;
+ snap_top_on = pdata->snap_top_on;
+ snap_top_off = pdata->snap_top_off;
+ snap_bottom_on = pdata->snap_bottom_on;
+ snap_bottom_off = pdata->snap_bottom_off;
+ fuzz_x = pdata->fuzz_x;
+ fuzz_y = pdata->fuzz_y;
+ fuzz_p = pdata->fuzz_p;
+ fuzz_w = pdata->fuzz_w;
+ } else {
+ irqflags = 0;
+ inactive_area_left = 0;
+ inactive_area_right = 0;
+ inactive_area_top = 0;
+ inactive_area_bottom = 0;
+ snap_left_on = 0;
+ snap_left_off = 0;
+ snap_right_on = 0;
+ snap_right_off = 0;
+ snap_top_on = 0;
+ snap_top_off = 0;
+ snap_bottom_on = 0;
+ snap_bottom_off = 0;
+ fuzz_x = 0;
+ fuzz_y = 0;
+ fuzz_p = 0;
+ fuzz_w = 0;
+ }
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf0);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret);
+
+ ret = i2c_smbus_read_byte_data(ts->client, 0xf1);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_byte_data failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed\n");
+ goto err_detect_failed;
+ }
+
+ msg[0].addr = ts->client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf0;
+ buf0[0] = 0xe0;
+ msg[1].addr = ts->client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 8;
+ msg[1].buf = buf1;
+ ret = i2c_transfer(ts->client->adapter, msg, 2);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_transfer failed\n");
+ goto err_detect_failed;
+ }
+ printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n",
+ buf1[0], buf1[1], buf1[2], buf1[3],
+ buf1[4], buf1[5], buf1[6], buf1[7]);
+
+ ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n");
+ goto err_detect_failed;
+ }
+ ret = i2c_smbus_read_word_data(ts->client, 0x02);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->has_relative_report = !(ret & 0x100);
+ printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret);
+ ret = i2c_smbus_read_word_data(ts->client, 0x04);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ ret = i2c_smbus_read_word_data(ts->client, 0x06);
+ if (ret < 0) {
+ printk(KERN_ERR "i2c_smbus_read_word_data failed\n");
+ goto err_detect_failed;
+ }
+ ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8);
+ if (ts->flags & SYNAPTICS_SWAP_XY)
+ swap(max_x, max_y);
+
+ ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */
+ if (ret < 0) {
+ printk(KERN_ERR "synaptics_init_panel failed\n");
+ goto err_detect_failed;
+ }
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n");
+ goto err_input_dev_alloc_failed;
+ }
+ ts->input_dev->name = "synaptics-rmi-touchscreen";
+ set_bit(EV_SYN, ts->input_dev->evbit);
+ set_bit(EV_KEY, ts->input_dev->evbit);
+ set_bit(BTN_TOUCH, ts->input_dev->keybit);
+ set_bit(BTN_2, ts->input_dev->keybit);
+ set_bit(EV_ABS, ts->input_dev->evbit);
+ inactive_area_left = inactive_area_left * max_x / 0x10000;
+ inactive_area_right = inactive_area_right * max_x / 0x10000;
+ inactive_area_top = inactive_area_top * max_y / 0x10000;
+ inactive_area_bottom = inactive_area_bottom * max_y / 0x10000;
+ snap_left_on = snap_left_on * max_x / 0x10000;
+ snap_left_off = snap_left_off * max_x / 0x10000;
+ snap_right_on = snap_right_on * max_x / 0x10000;
+ snap_right_off = snap_right_off * max_x / 0x10000;
+ snap_top_on = snap_top_on * max_y / 0x10000;
+ snap_top_off = snap_top_off * max_y / 0x10000;
+ snap_bottom_on = snap_bottom_on * max_y / 0x10000;
+ snap_bottom_off = snap_bottom_off * max_y / 0x10000;
+ fuzz_x = fuzz_x * max_x / 0x10000;
+ fuzz_y = fuzz_y * max_y / 0x10000;
+ ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left;
+ ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right;
+ ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top;
+ ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom;
+ ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on;
+ ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off;
+ ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on;
+ ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off;
+ ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on;
+ ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off;
+ ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on;
+ ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off;
+ printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y);
+ printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n",
+ inactive_area_left, inactive_area_right,
+ inactive_area_top, inactive_area_bottom);
+ printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n",
+ snap_left_on, snap_left_off, snap_right_on, snap_right_off,
+ snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off);
+ input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0);
+ /* ts->input_dev->name = ts->keypad_info->name; */
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name);
+ goto err_input_register_device_failed;
+ }
+ if (client->irq) {
+ ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts);
+ if (ret == 0) {
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+ if (ret)
+ free_irq(client->irq, ts);
+ }
+ if (ret == 0)
+ ts->use_irq = 1;
+ else
+ dev_err(&client->dev, "request_irq failed\n");
+ }
+ if (!ts->use_irq) {
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ts->timer.function = synaptics_ts_timer_func;
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = synaptics_ts_early_suspend;
+ ts->early_suspend.resume = synaptics_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
+ printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling");
+
+ return 0;
+
+err_input_register_device_failed:
+ input_free_device(ts->input_dev);
+
+err_input_dev_alloc_failed:
+err_detect_failed:
+err_power_failed:
+ kfree(ts);
+err_alloc_data_failed:
+err_check_functionality_failed:
+ return ret;
+}
+
+static int synaptics_ts_remove(struct i2c_client *client)
+{
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+ unregister_early_suspend(&ts->early_suspend);
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+ return 0;
+}
+
+static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->use_irq)
+ disable_irq(client->irq);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = cancel_work_sync(&ts->work);
+ if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */
+ enable_irq(client->irq);
+ ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+
+ ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n");
+ if (ts->power) {
+ ret = ts->power(0);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power off failed\n");
+ }
+ return 0;
+}
+
+static int synaptics_ts_resume(struct i2c_client *client)
+{
+ int ret;
+ struct synaptics_ts_data *ts = i2c_get_clientdata(client);
+
+ if (ts->power) {
+ ret = ts->power(1);
+ if (ret < 0)
+ printk(KERN_ERR "synaptics_ts_resume power on failed\n");
+ }
+
+ synaptics_init_panel(ts);
+
+ if (ts->use_irq)
+ enable_irq(client->irq);
+
+ if (!ts->use_irq)
+ hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ else
+ i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void synaptics_ts_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_suspend(ts->client, PMSG_SUSPEND);
+}
+
+static void synaptics_ts_late_resume(struct early_suspend *h)
+{
+ struct synaptics_ts_data *ts;
+ ts = container_of(h, struct synaptics_ts_data, early_suspend);
+ synaptics_ts_resume(ts->client);
+}
+#endif
+
+static const struct i2c_device_id synaptics_ts_id[] = {
+ { SYNAPTICS_I2C_RMI_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver synaptics_ts_driver = {
+ .probe = synaptics_ts_probe,
+ .remove = synaptics_ts_remove,
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ .suspend = synaptics_ts_suspend,
+ .resume = synaptics_ts_resume,
+#endif
+ .id_table = synaptics_ts_id,
+ .driver = {
+ .name = SYNAPTICS_I2C_RMI_NAME,
+ },
+};
+
+static int __devinit synaptics_ts_init(void)
+{
+ synaptics_wq = create_singlethread_workqueue("synaptics_wq");
+ if (!synaptics_wq)
+ return -ENOMEM;
+ return i2c_add_driver(&synaptics_ts_driver);
+}
+
+static void __exit synaptics_ts_exit(void)
+{
+ i2c_del_driver(&synaptics_ts_driver);
+ if (synaptics_wq)
+ destroy_workqueue(synaptics_wq);
+}
+
+module_init(synaptics_ts_init);
+module_exit(synaptics_ts_exit);
+
+MODULE_DESCRIPTION("Synaptics Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cfda58..5183a2d4fd5 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,14 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_AB5500
+ tristate "HVLED driver for AB5500"
+ depends on AB5500_CORE
+ help
+ This option enables support for the HVLED in AB5500
+ multi function device. Currently Ab5500 v1.0 chip leds
+ are supported.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481cb09f..59a569b376e 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_AB5500) += leds-ab5500.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
diff --git a/drivers/leds/leds-ab5500.c b/drivers/leds/leds-ab5500.c
new file mode 100644
index 00000000000..294551b1962
--- /dev/null
+++ b/drivers/leds/leds-ab5500.c
@@ -0,0 +1,811 @@
+/*
+ * leds-ab5500.c - driver for High Voltage (HV) LED in ST-Ericsson AB5500 chip
+ *
+ * Copyright (C) 2011 ST-Ericsson SA.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ */
+
+/*
+ * Driver for HVLED in ST-Ericsson AB5500 analog baseband controller
+ *
+ * This chip can drive upto 3 leds, of upto 40mA of led sink current.
+ * These leds can be programmed to blink between two intensities with
+ * fading delay of half, one or two seconds.
+ *
+ * Leds can be controlled via sysfs entries in
+ * "/sys/class/leds/< red | green | blue >"
+ *
+ * For each led,
+ *
+ * Modes of operation:
+ * - manual: echo 0 > fade_auto (default, no auto blinking)
+ * - auto: echo 1 > fade_auto
+ *
+ * Soft scaling delay between two intensities:
+ * - 1/2 sec: echo 1 > fade_delay
+ * - 1 sec: echo 2 > fade_delay
+ * - 2 sec: echo 3 > fade_delay
+ *
+ * Possible sequence of operation:
+ * - continuous glow: set brightness (brt)
+ * - blink between LED_OFF and LED_FULL:
+ * set fade delay -> set fade auto
+ * - blink between previous two brightness (only for LED-1):
+ * set brt1 -> set brt2 -> set fade auto
+ *
+ * Delay can be set in any step, its affect will be seen on switching mode.
+ *
+ * Note: Blink/Fade feature is supported in AB5500 v2 onwards
+ *
+ */
+
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/leds-ab5500.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#define AB5500LED_NAME "ab5500-leds"
+#define AB5500_LED_MAX 0x03
+
+/* Register offsets */
+#define AB5500_LED_REG_ENABLE 0x03
+#define AB5500_LED_FADE_CTRL 0x0D
+
+/* LED-0 Register Addr. Offsets */
+#define AB5500_LED0_PWM_DUTY 0x01
+#define AB5500_LED0_PWMFREQ 0x02
+#define AB5500_LED0_SINKCTL 0x0A
+#define AB5500_LED0_FADE_HI 0x11
+#define AB5500_LED0_FADE_LO 0x17
+
+/* LED-1 Register Addr. Offsets */
+#define AB5500_LED1_PWM_DUTY 0x05
+#define AB5500_LED1_PWMFREQ 0x06
+#define AB5500_LED1_SINKCTL 0x0B
+#define AB5500_LED1_FADE_HI 0x13
+#define AB5500_LED1_FADE_LO 0x19
+
+/* LED-2 Register Addr. Offsets */
+#define AB5500_LED2_PWM_DUTY 0x08
+#define AB5500_LED2_PWMFREQ 0x09
+#define AB5500_LED2_SINKCTL 0x0C
+#define AB5500_LED2_FADE_HI 0x15
+#define AB5500_LED2_FADE_LO 0x1B
+
+/* led-0/1/2 enable bit */
+#define AB5500_LED_ENABLE_MASK 0x04
+
+/* led intensity */
+#define AB5500_LED_INTENSITY_OFF 0x0
+#define AB5500_LED_INTENSITY_MAX 0x3FF
+#define AB5500_LED_INTENSITY_STEP (AB5500_LED_INTENSITY_MAX/LED_FULL)
+
+/* pwm frequency */
+#define AB5500_LED_PWMFREQ_MAX 0x0F /* 373.39 @sysclk=26MHz */
+#define AB5500_LED_PWMFREQ_SHIFT 4
+
+/* LED sink current control */
+#define AB5500_LED_SINKCURR_MAX 0x0F /* 40mA MAX */
+#define AB5500_LED_SINKCURR_SHIFT 4
+
+/* fade Control shift and masks */
+#define AB5500_FADE_DELAY_SHIFT 0x00
+#define AB5500_FADE_MODE_MASK 0x80
+#define AB5500_FADE_DELAY_MASK 0x03
+#define AB5500_FADE_START_MASK 0x04
+#define AB5500_FADE_ON_MASK 0x70
+#define AB5500_LED_FADE_ENABLE(ledid) (0x40 >> (ledid))
+
+struct ab5500_led {
+ u8 id;
+ u8 max_current;
+ u16 brt_val;
+ u16 fade_hi;
+ u16 fade_lo;
+ bool led_on;
+ struct led_classdev led_cdev;
+ struct work_struct led_work;
+};
+
+struct ab5500_hvleds {
+ struct mutex lock;
+ struct device *dev;
+ struct ab5500_hvleds_platform_data *pdata;
+ struct ab5500_led leds[AB5500_HVLEDS_MAX];
+ bool hw_fade;
+ bool fade_auto;
+ enum ab5500_fade_delay fade_delay;
+};
+
+static u8 ab5500_led_pwmduty_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_PWM_DUTY,
+ AB5500_LED1_PWM_DUTY,
+ AB5500_LED2_PWM_DUTY,
+};
+
+static u8 ab5500_led_pwmfreq_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_PWMFREQ,
+ AB5500_LED1_PWMFREQ,
+ AB5500_LED2_PWMFREQ,
+};
+
+static u8 ab5500_led_sinkctl_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_SINKCTL,
+ AB5500_LED1_SINKCTL,
+ AB5500_LED2_SINKCTL
+};
+
+static u8 ab5500_led_fade_hi_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_FADE_HI,
+ AB5500_LED1_FADE_HI,
+ AB5500_LED2_FADE_HI,
+};
+
+static u8 ab5500_led_fade_lo_reg[AB5500_LED_MAX] = {
+ AB5500_LED0_FADE_LO,
+ AB5500_LED1_FADE_LO,
+ AB5500_LED2_FADE_LO,
+};
+
+#define to_led(_x) container_of(_x, struct ab5500_led, _x)
+
+static inline struct ab5500_hvleds *led_to_hvleds(struct ab5500_led *led)
+{
+ return container_of(led, struct ab5500_hvleds, leds[led->id]);
+}
+
+static int ab5500_led_enable(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+
+ ret = abx500_mask_and_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id],
+ AB5500_LED_ENABLE_MASK,
+ AB5500_LED_ENABLE_MASK);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ return ret;
+
+}
+
+static int ab5500_led_start_manual(struct ab5500_hvleds *hvleds)
+{
+ int ret;
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_mask_and_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, AB5500_FADE_START_MASK,
+ AB5500_FADE_START_MASK);
+ if (ret < 0)
+ dev_err(hvleds->dev, "update reg 0x%x failed - %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_disable(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id] - 1, 0);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id], 0);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ return ret;
+}
+
+static int ab5500_led_pwmduty_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u16 val)
+{
+ int ret;
+ u8 val_lsb = val & 0xFF;
+ u8 val_msb = (val & 0x300) >> 8;
+
+ mutex_lock(&hvleds->lock);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n"
+ "reg[%d] w val = %d\n",
+ ab5500_led_pwmduty_reg[led_id] - 1, val_lsb,
+ ab5500_led_pwmduty_reg[led_id], val_msb);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id] - 1, val_lsb);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmduty_reg[led_id], val_msb);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmduty_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_pwmfreq_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u8 val)
+{
+ int ret;
+
+ val = (val & 0x0F) << AB5500_LED_PWMFREQ_SHIFT;
+
+ mutex_lock(&hvleds->lock);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n",
+ ab5500_led_pwmfreq_reg[led_id], val);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_pwmfreq_reg[led_id], val);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_pwmfreq_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_sinkctl_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, u8 val)
+{
+ int ret;
+
+ if (val > AB5500_LED_SINKCURR_MAX)
+ val = AB5500_LED_SINKCURR_MAX;
+
+ val = (val << AB5500_LED_SINKCURR_SHIFT);
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n",
+ ab5500_led_sinkctl_reg[led_id], val);
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_sinkctl_reg[led_id], val);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ ab5500_led_sinkctl_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_fade_write(struct ab5500_hvleds *hvleds,
+ unsigned int led_id, bool on, u16 val)
+{
+ int ret;
+ int val_lsb = val & 0xFF;
+ int val_msb = (val & 0x300) >> 8;
+ u8 *fade_reg;
+
+ if (on)
+ fade_reg = ab5500_led_fade_hi_reg;
+ else
+ fade_reg = ab5500_led_fade_lo_reg;
+
+ dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n"
+ "reg[%d] w val = %d\n",
+ fade_reg[led_id] - 1, val_lsb,
+ fade_reg[led_id], val_msb);
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ fade_reg[led_id] - 1, val_lsb);
+ ret |= abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ fade_reg[led_id], val_msb);
+ if (ret < 0)
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ fade_reg[led_id], ret);
+
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static int ab5500_led_sinkctl_read(struct ab5500_hvleds *hvleds,
+ unsigned int led_id)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_get_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ ab5500_led_sinkctl_reg[led_id], &val);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] r failed: %d\n",
+ ab5500_led_sinkctl_reg[led_id], ret);
+ mutex_unlock(&hvleds->lock);
+ return ret;
+ }
+
+ val = (val & 0xF0) >> AB5500_LED_SINKCURR_SHIFT;
+
+ mutex_unlock(&hvleds->lock);
+
+ return val;
+}
+
+static void ab5500_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct ab5500_led *led = to_led(led_cdev);
+
+ /* adjust LED_FULL to 10bit range */
+ brt_val &= LED_FULL;
+ led->brt_val = brt_val * AB5500_LED_INTENSITY_STEP;
+
+ schedule_work(&led->led_work);
+}
+
+static void ab5500_led_work(struct work_struct *led_work)
+{
+ struct ab5500_led *led = to_led(led_work);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (led->led_on == true) {
+ ab5500_led_pwmduty_write(hvleds, led->id, led->brt_val);
+ if (hvleds->hw_fade && led->brt_val) {
+ ab5500_led_enable(hvleds, led->id);
+ ab5500_led_start_manual(hvleds);
+ }
+ }
+}
+
+static ssize_t ab5500_led_show_current(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int led_curr = 0;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ led_curr = ab5500_led_sinkctl_read(hvleds, led->id);
+
+ if (led_curr < 0)
+ return led_curr;
+
+ return sprintf(buf, "%d\n", led_curr);
+}
+
+static ssize_t ab5500_led_store_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int ret;
+ unsigned long led_curr;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &led_curr))
+ return -EINVAL;
+
+ if (led_curr > led->max_current)
+ led_curr = led->max_current;
+
+ ret = ab5500_led_sinkctl_write(hvleds, led->id, led_curr);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t ab5500_led_store_fade_auto(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ int ret;
+ u8 fade_ctrl = 0;
+ unsigned long fade_auto;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &fade_auto))
+ return -EINVAL;
+
+ if (fade_auto > 1) {
+ dev_err(hvleds->dev, "invalid mode\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hvleds->lock);
+
+ ret = abx500_get_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, &fade_ctrl);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+ goto unlock_and_return;
+ }
+
+ /* manual mode */
+ if (fade_auto == false) {
+ fade_ctrl &= ~(AB5500_LED_FADE_ENABLE(led->id));
+ if (!(fade_ctrl & AB5500_FADE_ON_MASK))
+ fade_ctrl = 0;
+
+ ret = ab5500_led_disable(hvleds, led->id);
+ if (ret < 0)
+ goto unlock_and_return;
+ } else {
+ /* set led auto enable bit */
+ fade_ctrl |= AB5500_FADE_MODE_MASK;
+ fade_ctrl |= AB5500_LED_FADE_ENABLE(led->id);
+
+ /* set fade delay */
+ fade_ctrl &= ~AB5500_FADE_DELAY_MASK;
+ fade_ctrl |= hvleds->fade_delay << AB5500_FADE_DELAY_SHIFT;
+
+ /* set fade start manual */
+ fade_ctrl |= AB5500_FADE_START_MASK;
+
+ /* enble corresponding led */
+ ret = ab5500_led_enable(hvleds, led->id);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ }
+
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_FADE_CTRL, fade_ctrl);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_FADE_CTRL, ret);
+ goto unlock_and_return;
+ }
+
+ hvleds->fade_auto = fade_auto;
+
+ ret = len;
+
+unlock_and_return:
+ mutex_unlock(&hvleds->lock);
+
+ return ret;
+}
+
+static ssize_t ab5500_led_show_fade_auto(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ return sprintf(buf, "%d\n", hvleds->fade_auto);
+}
+
+static ssize_t ab5500_led_store_fade_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ unsigned long fade_delay;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct ab5500_led *led = to_led(led_cdev);
+ struct ab5500_hvleds *hvleds = led_to_hvleds(led);
+
+ if (strict_strtoul(buf, 0, &fade_delay))
+ return -EINVAL;
+
+ if (fade_delay > AB5500_FADE_DELAY_TWOSEC) {
+ dev_err(hvleds->dev, "invalid mode\n");
+ return -EINVAL;
+ }
+
+ hvleds->fade_delay = fade_delay;
+
+ return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO,
+ ab5500_led_show_current, ab5500_led_store_current);
+static DEVICE_ATTR(fade_auto, S_IRUGO | S_IWUGO,
+ ab5500_led_show_fade_auto, ab5500_led_store_fade_auto);
+static DEVICE_ATTR(fade_delay, S_IRUGO | S_IWUGO,
+ NULL, ab5500_led_store_fade_delay);
+
+static int ab5500_led_init_registers(struct ab5500_hvleds *hvleds)
+{
+ int ret = 0;
+ unsigned int led_id;
+
+ /* fade - manual : dur mid : pwm duty mid */
+ if (!hvleds->hw_fade) {
+ ret = abx500_set_register_interruptible(
+ hvleds->dev, AB5500_BANK_LED,
+ AB5500_LED_REG_ENABLE, true);
+ if (ret < 0) {
+ dev_err(hvleds->dev, "reg[%d] w failed: %d\n",
+ AB5500_LED_REG_ENABLE, ret);
+ return ret;
+ }
+ }
+
+ for (led_id = 0; led_id < AB5500_HVLEDS_MAX; led_id++) {
+ if (hvleds->leds[led_id].led_on == false)
+ continue;
+
+ ret = ab5500_led_sinkctl_write(
+ hvleds, led_id,
+ hvleds->leds[led_id].max_current);
+ if (ret < 0)
+ return ret;
+
+ if (hvleds->hw_fade) {
+ ret = ab5500_led_pwmfreq_write(
+ hvleds, led_id,
+ AB5500_LED_PWMFREQ_MAX / 2);
+ if (ret < 0)
+ return ret;
+
+ /* fade high intensity */
+ ret = ab5500_led_fade_write(
+ hvleds, led_id, true,
+ hvleds->leds[led_id].fade_hi);
+ if (ret < 0)
+ return ret;
+
+ /* fade low intensity */
+ ret = ab5500_led_fade_write(
+ hvleds, led_id, false,
+ hvleds->leds[led_id].fade_lo);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* init led off */
+ ret |= ab5500_led_pwmduty_write(
+ hvleds, led_id, AB5500_LED_INTENSITY_OFF);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab5500_led_register_leds(struct device *dev,
+ struct ab5500_hvleds_platform_data *pdata,
+ struct ab5500_hvleds *hvleds)
+{
+ int i_led;
+ int ret = 0;
+ struct ab5500_led_conf *pled;
+ struct ab5500_led *led;
+
+ hvleds->dev = dev;
+ hvleds->pdata = pdata;
+
+ if (abx500_get_chip_id(dev) == AB5500_2_0)
+ hvleds->hw_fade = true;
+ else
+ hvleds->hw_fade = false;
+
+ for (i_led = 0; i_led < AB5500_HVLEDS_MAX; i_led++) {
+ pled = &pdata->leds[i_led];
+ led = &hvleds->leds[i_led];
+
+ INIT_WORK(&led->led_work, ab5500_led_work);
+
+ led->id = pled->led_id;
+ led->max_current = pled->max_current;
+ led->led_on = pled->led_on;
+ led->led_cdev.name = pled->name;
+ led->led_cdev.brightness_set = ab5500_led_brightness_set;
+
+ /* Provide interface only for enabled LEDs */
+ if (led->led_on == false)
+ continue;
+
+ if (hvleds->hw_fade) {
+ led->fade_hi = (pled->fade_hi & LED_FULL);
+ led->fade_hi *= AB5500_LED_INTENSITY_STEP;
+ led->fade_lo = (pled->fade_lo & LED_FULL);
+ led->fade_lo *= AB5500_LED_INTENSITY_STEP;
+ }
+
+ ret = led_classdev_register(dev, &led->led_cdev);
+ if (ret < 0) {
+ dev_err(dev, "Register led class failed: %d\n", ret);
+ goto bailout1;
+ }
+
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_led_current);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device creation failed: %d\n", ret);
+ goto bailout2;
+ }
+
+ if (hvleds->hw_fade) {
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device "
+ "creation failed: %d\n", ret);
+ goto bailout3;
+ }
+
+ ret = device_create_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ if (ret < 0) {
+ dev_err(dev, "sysfs device "
+ "creation failed: %d\n", ret);
+ goto bailout4;
+ }
+ }
+ }
+
+ return ret;
+ for (; i_led >= 0; i_led--) {
+ if (hvleds->leds[i_led].led_on == false)
+ continue;
+
+ if (hvleds->hw_fade) {
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_fade_delay);
+bailout4:
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_fade_auto);
+ }
+bailout3:
+ device_remove_file(hvleds->leds[i_led].led_cdev.dev,
+ &dev_attr_led_current);
+bailout2:
+ led_classdev_unregister(&hvleds->leds[i_led].led_cdev);
+bailout1:
+ cancel_work_sync(&hvleds->leds[i_led].led_work);
+ }
+ return ret;
+}
+
+static int __devinit ab5500_hvleds_probe(struct platform_device *pdev)
+{
+ struct ab5500_hvleds_platform_data *pdata = pdev->dev.platform_data;
+ struct ab5500_hvleds *hvleds = NULL;
+ int ret = 0, i;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data required\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ hvleds = kzalloc(sizeof(struct ab5500_hvleds), GFP_KERNEL);
+ if (hvleds == NULL) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ mutex_init(&hvleds->lock);
+
+ /* init leds data and register led_classdev */
+ ret = ab5500_led_register_leds(&pdev->dev, pdata, hvleds);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "leds registration failed\n");
+ goto err_out;
+ }
+
+ /* init device registers and set initial led current */
+ ret = ab5500_led_init_registers(hvleds);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "reg init failed: %d\n", ret);
+ goto err_reg_init;
+ }
+
+ if (hvleds->hw_fade)
+ dev_info(&pdev->dev, "v2 enabled\n");
+ else
+ dev_info(&pdev->dev, "v1 enabled\n");
+
+ return ret;
+
+err_reg_init:
+ for (i = 0; i < AB5500_HVLEDS_MAX; i++) {
+ struct ab5500_led *led = &hvleds->leds[i];
+
+ if (led->led_on == false)
+ continue;
+
+ device_remove_file(led->led_cdev.dev, &dev_attr_led_current);
+ if (hvleds->hw_fade) {
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ }
+ led_classdev_unregister(&led->led_cdev);
+ cancel_work_sync(&led->led_work);
+ }
+err_out:
+ kfree(hvleds);
+ return ret;
+}
+
+static int __devexit ab5500_hvleds_remove(struct platform_device *pdev)
+{
+ struct ab5500_hvleds *hvleds = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < AB5500_HVLEDS_MAX; i++) {
+ struct ab5500_led *led = &hvleds->leds[i];
+
+ if (led->led_on == false)
+ continue;
+
+ device_remove_file(led->led_cdev.dev, &dev_attr_led_current);
+ if (hvleds->hw_fade) {
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_auto);
+ device_remove_file(led->led_cdev.dev,
+ &dev_attr_fade_delay);
+ }
+ led_classdev_unregister(&led->led_cdev);
+ cancel_work_sync(&led->led_work);
+ }
+ kfree(hvleds);
+ return 0;
+}
+
+static struct platform_driver ab5500_hvleds_driver = {
+ .driver = {
+ .name = AB5500LED_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_hvleds_probe,
+ .remove = __devexit_p(ab5500_hvleds_remove),
+};
+
+static int __init ab5500_hvleds_module_init(void)
+{
+ return platform_driver_register(&ab5500_hvleds_driver);
+}
+
+static void __exit ab5500_hvleds_module_exit(void)
+{
+ platform_driver_unregister(&ab5500_hvleds_driver);
+}
+
+module_init(ab5500_hvleds_module_init);
+module_exit(ab5500_hvleds_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
+MODULE_DESCRIPTION("Driver for AB5500 HVLED");
+
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5fef4f..41aed6c89ab 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
+#include <linux/gpio.h>
#define LM3530_LED_DEV "lcd-backlight"
#define LM3530_NAME "lm3530-led"
@@ -101,6 +102,7 @@ static struct lm3530_mode_map mode_map[] = {
* @mode: mode of operation - manual, ALS, PWM
* @regulator: regulator
* @brighness: previous brightness value
+ * @hw_en_gpio: GPIO line for LM3530 HWEN
* @enable: regulator is enabled
*/
struct lm3530_data {
@@ -110,6 +112,7 @@ struct lm3530_data {
enum lm3530_mode mode;
struct regulator *regulator;
enum led_brightness brightness;
+ int hw_en_gpio;
bool enable;
};
@@ -151,7 +154,7 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
- u8 zones[LM3530_ALS_ZB_MAX];
+ u8 zones[LM3530_ALS_ZB_MAX] = {0};
u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
@@ -230,6 +233,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
reg_val[13] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
if (!drvdata->enable) {
+ if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO)
+ gpio_set_value(drvdata->hw_en_gpio, 1);
ret = regulator_enable(drvdata->regulator);
if (ret) {
dev_err(&drvdata->client->dev,
@@ -294,6 +299,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
if (err)
dev_err(&drvdata->client->dev,
"Disable regulator failed\n");
+ if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO)
+ gpio_set_value(drvdata->hw_en_gpio, 0);
drvdata->enable = false;
}
break;
@@ -397,6 +404,7 @@ static int __devinit lm3530_probe(struct i2c_client *client,
drvdata->client = client;
drvdata->pdata = pdata;
drvdata->brightness = LED_OFF;
+ drvdata->hw_en_gpio = pdata->hw_en_gpio;
drvdata->enable = false;
drvdata->led_dev.name = LM3530_LED_DEV;
drvdata->led_dev.brightness_set = lm3530_brightness_set;
@@ -404,6 +412,15 @@ static int __devinit lm3530_probe(struct i2c_client *client,
i2c_set_clientdata(client, drvdata);
+ if (gpio_is_valid(drvdata->hw_en_gpio)) {
+ err = gpio_request_one(drvdata->hw_en_gpio, GPIOF_OUT_INIT_HIGH,
+ "lm3530_hw_en");
+ if (err < 0) {
+ dev_err(&client->dev, "lm3530 hw_en gpio failed: %d\n", err);
+ goto err_gpio_request;
+ }
+ }
+
drvdata->regulator = regulator_get(&client->dev, "vin");
if (IS_ERR(drvdata->regulator)) {
dev_err(&client->dev, "regulator get failed\n");
@@ -443,6 +460,10 @@ err_class_register:
err_reg_init:
regulator_put(drvdata->regulator);
err_regulator_get:
+ if (gpio_is_valid(drvdata->hw_en_gpio))
+ gpio_free(drvdata->hw_en_gpio);
+err_gpio_request:
+ i2c_set_clientdata(client, NULL);
kfree(drvdata);
err_out:
return err;
@@ -457,6 +478,8 @@ static int __devexit lm3530_remove(struct i2c_client *client)
if (drvdata->enable)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
+ if (gpio_is_valid(drvdata->hw_en_gpio))
+ gpio_free(drvdata->hw_en_gpio);
led_classdev_unregister(&drvdata->led_dev);
kfree(drvdata);
return 0;
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723b869..2f667071278 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -355,7 +355,12 @@ static int lp5521_do_store_load(struct lp5521_engine *engine,
while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
/* separate sscanfs because length is working only for %s */
ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
- if (ret != 2)
+ /*
+ * Execution of a %n directive does not always
+ * increment the assignment count returned at
+ * completion of execution.so ret need not be 2
+ */
+ if ((ret != 1) && (ret != 2))
goto fail;
ret = sscanf(c, "%2x", &cmd);
if (ret != 1)
@@ -787,6 +792,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
if (buf != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&client->dev, "error in resetting chip\n");
+ ret = -EIO;
goto fail2;
}
usleep_range(10000, 20000);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 3ed92f34bd4..9c5f7136b37 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,9 +27,60 @@ struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
unsigned int active_low;
+ unsigned int lth_brightness;
unsigned int period;
+ unsigned int dutycycle_steps;
+ unsigned int period_steps;
};
+static int led_pwm_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct led_pwm_data *led_dat =
+ container_of(led_cdev, struct led_pwm_data, cdev);
+ int dutycycle_ms, period_sec;
+ int dutycycle, period;
+ /*
+ * If both the delays are zero set some sensible delay
+ */
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+ /*
+ * calculate the duty cycle from on and off time
+ */
+ dutycycle_ms = ((*delay_on * 1000)/(*delay_on + *delay_off));
+ /*
+ * convert calculated value to write into the PWM out register
+ */
+ if (led_dat->dutycycle_steps)
+ dutycycle = ((dutycycle_ms * led_dat->dutycycle_steps)/1000);
+ else
+ dutycycle = (dutycycle_ms/1000);
+ /*
+ * calculate period from on and off time(msec)
+ */
+ period_sec = ((*delay_on + *delay_off)/1000);
+ /*
+ * convert calculated value to write into the PWM out register
+ */
+ if (led_dat->period_steps) {
+ if ((*delay_on + *delay_off) == 500)
+ period = led_dat->period_steps;
+ else
+ period = led_dat->period_steps - period_sec;
+ }
+ else
+ period = period_sec;
+ /*
+ * configure the PWM registers and enable blink functionality
+ */
+ pwm_config_blink(led_dat->pwm, dutycycle, period);
+ pwm_blink_ctrl(led_dat->pwm, 1);
+ return 0;
+}
+
static void led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -42,7 +93,10 @@ static void led_pwm_set(struct led_classdev *led_cdev,
pwm_config(led_dat->pwm, 0, period);
pwm_disable(led_dat->pwm);
} else {
- pwm_config(led_dat->pwm, brightness * period / max, period);
+ brightness = led_dat->lth_brightness + (brightness *
+ (led_dat->period - led_dat->lth_brightness) / max);
+ pwm_config(led_dat->pwm, brightness, led_dat->period);
+
pwm_enable(led_dat->pwm);
}
}
@@ -79,8 +133,13 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->cdev.default_trigger = cur_led->default_trigger;
led_dat->active_low = cur_led->active_low;
led_dat->period = cur_led->pwm_period_ns;
+ led_dat->lth_brightness = cur_led->lth_brightness *
+ (cur_led->pwm_period_ns / cur_led->max_brightness);
+ led_dat->dutycycle_steps = cur_led->dutycycle_steps;
+ led_dat->period_steps = cur_led->period_steps;
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
+ led_dat->cdev.blink_set = led_pwm_blink_set;
led_dat->cdev.max_brightness = cur_led->max_brightness;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 11e44386fa9..825673af5f3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -331,6 +331,17 @@ config MFD_TC3589X
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_TC35892
+ bool "Support Toshiba TC35892"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the Toshiba TC35892 I/O Expander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TMIO
bool
default n
@@ -359,6 +370,27 @@ config MFD_TC6393XB
help
Support for Toshiba Mobile IO Controller TC6393XB
+config AB5500_CORE
+ bool "ST-Ericsson AB5500 Mixed Signal Circuit core functions"
+ select MFD_CORE
+ depends on GENERIC_HARDIRQS && ABX500_CORE
+ help
+ Select this to enable the AB5500 Mixed Signal IC core
+ functionality. This connects to a AB5500 chip on the I2C bus via
+ the Power and Reset Management Unit (PRCMU). It exposes a number
+ of symbols needed for dependent devices to read and write
+ registers and subscribe to events from this multi-functional IC.
+ This is needed to use other features of the AB5500 such as
+ battery-backed RTC, charging control, Regulators, LEDs, vibrator,
+ system power and temperature, power management and ALSA sound.
+
+config AB5500_GPADC
+ bool "AB5500 GPADC driver"
+ depends on AB5500_CORE
+ default y
+ help
+ AB5500 GPADC driver used to convert battery/usb voltage.
+
config PMIC_DA903X
bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
depends on I2C=y
@@ -678,7 +710,7 @@ config AB8500_CORE
config AB8500_I2C_CORE
bool "AB8500 register access via PRCMU I2C"
- depends on AB8500_CORE && MFD_DB8500_PRCMU
+ depends on AB8500_CORE
default y
help
This enables register access to the AB8500 chip via PRCMU I2C.
@@ -686,6 +718,14 @@ config AB8500_I2C_CORE
the I2C bus is connected to the Power Reset
and Mangagement Unit, PRCMU.
+config AB8500_DENC
+ bool "AB8500_DENC driver support(CVBS)"
+ depends on AB8500_CORE
+ help
+ Select this option to add driver support for analog TV out through
+ AB8500.
+
+
config AB8500_DEBUG
bool "Enable debug info via debugfs"
depends on AB8500_CORE && DEBUG_FS
@@ -696,10 +736,10 @@ config AB8500_DEBUG
config AB8500_GPADC
bool "AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
+ depends on AB8500_CORE
default y
help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
+ AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage.
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 05fa538c5ef..cfed0402931 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -2,6 +2,7 @@
# Makefile for multifunction miscellaneous devices
#
+obj-$(CONFIG_AB5500_CORE) += ab5500-core.o ab5500-power.o
88pm860x-objs := 88pm860x-core.o 88pm860x-i2c.o
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
+obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -91,11 +93,13 @@ obj-$(CONFIG_AB5500_CORE) += ab5500-core.o
obj-$(CONFIG_AB5500_DEBUG) += ab5500-debugfs.o
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
+obj-$(CONFIG_AB8500_DENC) += ab8500-denc.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o
+obj-$(CONFIG_AB5500_GPADC) += ab5500-gpadc.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index 54d0fe40845..e8ae5d945e4 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -991,6 +991,74 @@ static struct mfd_cell ab5500_devs[AB5500_NUM_DEVICES] = {
},
},
},
+ [AB5500_DEVID_TEMPMON] = {
+ .name = "abx500-temp",
+ .id = AB5500_DEVID_TEMPMON,
+ .num_resources = 1,
+ .resources = (struct resource[]) {
+ {
+ .name = "ABX500_TEMP_WARM",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 2),
+ .end = AB5500_IRQ(2, 2),
+ },
+ },
+ },
+ [AB5500_DEVID_ACCDET] = {
+ .name = "ab5500-acc-det",
+ .id = AB5500_DEVID_ACCDET,
+ .num_resources = 8,
+ .resources = (struct resource[]) {
+ {
+ .name = "acc_detedt22db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 7),
+ .end = AB5500_IRQ(2, 7),
+ },
+ {
+ .name = "acc_detedt21db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 6),
+ .end = AB5500_IRQ(2, 6),
+ },
+ {
+ .name = "acc_detedt21db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(2, 5),
+ .end = AB5500_IRQ(2, 5),
+ },
+ {
+ .name = "acc_detedt3db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 4),
+ .end = AB5500_IRQ(3, 4),
+ },
+ {
+ .name = "acc_detedt3db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 3),
+ .end = AB5500_IRQ(3, 3),
+ },
+ {
+ .name = "acc_detedt1db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 2),
+ .end = AB5500_IRQ(3, 2),
+ },
+ {
+ .name = "acc_detedt1db_rising",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 1),
+ .end = AB5500_IRQ(3, 1),
+ },
+ {
+ .name = "acc_detedt22db_falling",
+ .flags = IORESOURCE_IRQ,
+ .start = AB5500_IRQ(3, 0),
+ .end = AB5500_IRQ(3, 0),
+ },
+ },
+ },
};
/*
@@ -1301,6 +1369,10 @@ static const struct ab_family_id ids[] __initdata = {
.id = AB5500_1_1,
.name = "1.1"
},
+ {
+ .id = AB5500_2_0,
+ .name = "2.0"
+ },
/* Terminator */
{
.id = 0x00,
diff --git a/drivers/mfd/ab5500-gpadc.c b/drivers/mfd/ab5500-gpadc.c
new file mode 100644
index 00000000000..fe05ffbadfd
--- /dev/null
+++ b/drivers/mfd/ab5500-gpadc.c
@@ -0,0 +1,1256 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Vijaya Kumar K <vijay.kilari@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+
+/*
+ * Manual mode ADC registers
+ */
+#define AB5500_GPADC_MANUAL_STAT_REG 0x1F
+#define AB5500_GPADC_MANDATAL_REG 0x21
+#define AB5500_GPADC_MANDATAH_REG 0x20
+#define AB5500_GPADC_MANUAL_MUX_CTRL 0x22
+#define AB5500_GPADC_MANUAL_MODE_CTRL 0x23
+#define AB5500_GPADC_MANUAL_MODE_CTRL2 0x24
+/*
+ * Auto/Polling mode ADC registers
+ */
+#define AB5500_GPADC_AUTO_VBAT_MAX 0x26
+#define AB5500_GPADC_AUTO_VBAT_MIN_TXON 0x27
+#define AB5500_GPADC_AUTO_VBAT_MIN_NOTX 0x28
+#define AB5500_GPADC_AUTO_VBAT_AVGH 0x29
+#define AB5500_GPADC_AUTO_VBAT_AVGL 0x2A
+#define AB5500_GPADC_AUTO_ICHAR_MAX 0x2B
+#define AB5500_GPADC_AUTO_ICHAR_MIN 0x2C
+#define AB5500_GPADC_AUTO_ICHAR_AVG 0x2D
+#define AB5500_GPADC_AUTO_CTRL2 0x2F
+#define AB5500_GPADC_AUTO_CTRL1 0x30
+#define AB5500_GPADC_AUTO_PWR_CTRL 0x31
+#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON 0x32
+#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX 0x33
+#define AB5500_GPADC_AUTO_TRIG_ADOUT0_CTRL 0x34
+#define AB5500_GPADC_AUTO_TRIG_ADOUT1_CTRL 0x35
+#define AB5500_GPADC_AUTO_TRIG0_MUX_CTRL 0x37
+#define AB5500_GPADC_AUTO_XTALTEMP_CTRL 0x57
+#define AB5500_GPADC_KELVIN_CTRL 0xFE
+
+/* gpadc constants */
+#define AB5500_INT_ADC_TRIG0 0x0
+#define AB5500_INT_ADC_TRIG1 0x1
+#define AB5500_INT_ADC_TRIG2 0x2
+#define AB5500_INT_ADC_TRIG3 0x3
+#define AB5500_INT_ADC_TRIG4 0x4
+#define AB5500_INT_ADC_TRIG5 0x5
+#define AB5500_INT_ADC_TRIG6 0x6
+#define AB5500_INT_ADC_TRIG7 0x7
+
+#define AB5500_GPADC_AUTO_TRIG_INDEX AB5500_GPADC_AUTO_TRIG0_MUX_CTRL
+#define GPADC_MANUAL_READY 0x01
+#define GPADC_MANUAL_ADOUT0_MASK 0x30
+#define GPADC_MANUAL_ADOUT1_MASK 0xC0
+#define GPADC_MANUAL_ADOUT0_ON 0x10
+#define GPADC_MANUAL_ADOUT1_ON 0x40
+#define MUX_SCALE_GPADC0_MASK 0x08
+#define MUX_SCALE_VBAT_MASK 0x02
+#define MUX_SCALE_45 0x02
+#define MUX_SCALE_BDATA_MASK 0x01
+#define MUX_SCALE_BDATA27 0x00
+#define MUX_SCALE_BDATA18 0x01
+#define MUX_SCALE_ACCDET2_MASK 0x01
+#define MUX_SCALE_ACCDET3_MASK 0x02
+#define GPADC0_SCALE_VOL27 0x00
+#define GPADC0_SCALE_VOL18 0x01
+#define ACCDET2_SCALE_VOL27 0x00
+#define ACCDET3_SCALE_VOL27 0x00
+#define TRIGX_FREQ_MASK 0x07
+#define AUTO_VBAT_MASK 0x10
+#define AUTO_VBAT_ON 0x10
+#define TRIG_VBAT_TXON_ARM_MASK 0x08
+#define TRIG_VBAT_NOTX_ARM_MASK 0x04
+#define TRIGX_ARM_MASK 0x20
+#define TRIGX_ARM 0x20
+#define TRIGX_MUX_SELECT 0x1F
+#define ADC_CAL_OFF_MASK 0x04
+#define ADC_ON_MODE_MASK 0x03
+#define ADC_CAL_ON 0x00
+#define ADC_FULLPWR 0x03
+#define ADC_XTAL_FORCE_MASK 0x80
+#define ADC_XTAL_FORCE_EN 0x80
+#define ADC_XTAL_FORCE_DI 0x00
+#define ADOUT0 0x01
+#define ADOUT1 0x02
+#define MIN_INDEX 0x02
+#define MAX_INDEX 0x03
+#define CTRL_INDEX 0x01
+#define KELVIN_SCALE_VOL45 0x00
+
+/* GPADC constants from AB5500 spec */
+#define GPADC0_MIN 0
+#define GPADC0_MAX 1800
+#define BTEMP_MIN 0
+#define BTEMP_MAX 1800
+#define BDATA_MIN 0
+#define BDATA_MAX 2750
+#define PCBTEMP_MIN 0
+#define PCBTEMP_MAX 1800
+#define XTALTEMP_MIN 0
+#define XTALTEMP_MAX 1800
+#define DIETEMP_MIN 0
+#define DIETEMP_MAX 1800
+#define VBUS_I_MIN 0
+#define VBUS_I_MAX 1600
+#define VBUS_V_MIN 0
+#define VBUS_V_MAX 20000
+#define ACCDET2_MIN 0
+#define ACCDET2_MAX 2500
+#define ACCDET3_MIN 0
+#define ACCDET3_MAX 2500
+#define VBAT_MIN 2300
+#define VBAT_MAX 4500
+#define BKBAT_MIN 0
+#define BKBAT_MAX 2750
+#define USBID_MIN 0
+#define USBID_MAX 1800
+#define KELVIN_MIN 0
+#define KELVIN_MAX 4500
+#define VIBRA_MIN 0
+#define VIBRA_MAX 4500
+
+/* This is used for calibration */
+#define ADC_RESOLUTION 1023
+#define AUTO_ADC_RESOLUTION 255
+
+/* ADOUT prestart time */
+#define ADOUT0_TRIGX_PRESTART 0x18
+
+enum adc_auto_channels {
+ ADC_INPUT_TRIG0 = 0,
+ ADC_INPUT_TRIG1,
+ ADC_INPUT_TRIG2,
+ ADC_INPUT_TRIG3,
+ ADC_INPUT_TRIG4,
+ ADC_INPUT_TRIG5,
+ ADC_INPUT_TRIG6,
+ ADC_INPUT_TRIG7,
+ ADC_INPUT_VBAT_TXOFF,
+ ADC_INPUT_VBAT_TXON,
+ N_AUTO_TRIGGER
+};
+
+/**
+ * struct adc_auto_trigger - AB5500 GPADC auto trigger
+ * @adc_mux Mux input
+ * @flag Status of trigger
+ * @freq Frequency of conversion
+ * @adout Adout to pull
+ * @trig_min trigger minimum value
+ * @trig_max trigger maximum value
+ * @auto_adc_callback notification callback
+ */
+struct adc_auto_trigger {
+ u8 auto_mux;
+ u8 flag;
+ u8 freq;
+ u8 adout;
+ u8 trig_min;
+ u8 trig_max;
+ int (*auto_callb)(int mux);
+};
+
+/**
+ * struct ab5500_btemp_interrupts - ab5500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_adc_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+/**
+ * struct ab5500_gpadc - AB5500 GPADC device information
+ * @chip_id ABB chip id
+ * @dev: pointer to the struct device
+ * @node: a list of AB5500 GPADCs, hence prepared for
+ reentrance
+ * @ab5500_gpadc_complete: pointer to the struct completion, to indicate
+ * the completion of gpadc conversion
+ * @ab5500_gpadc_lock: structure of type mutex
+ * @regu: pointer to the struct regulator
+ * @irq: interrupt number that is used by gpadc
+ * @cal_data array of ADC calibration data structs
+ * @auto_trig auto trigger channel
+ * @gpadc_trigX_work work items for trigger channels
+ */
+struct ab5500_gpadc {
+ u8 chip_id;
+ struct device *dev;
+ struct list_head node;
+ struct mutex ab5500_gpadc_lock;
+ struct regulator *regu;
+ int irq;
+ int prev_bdata;
+ spinlock_t gpadc_auto_lock;
+ struct adc_auto_trigger adc_trig[N_AUTO_TRIGGER];
+ struct workqueue_struct *gpadc_wq;
+ struct work_struct gpadc_trig0_work;
+ struct work_struct gpadc_trig1_work;
+ struct work_struct gpadc_trig2_work;
+ struct work_struct gpadc_trig3_work;
+ struct work_struct gpadc_trig4_work;
+ struct work_struct gpadc_trig5_work;
+ struct work_struct gpadc_trig6_work;
+ struct work_struct gpadc_trig7_work;
+ struct work_struct gpadc_trig_vbat_txon_work;
+ struct work_struct gpadc_trig_vbat_txoff_work;
+};
+
+static LIST_HEAD(ab5500_gpadc_list);
+
+struct adc_data {
+ u8 mux;
+ int min;
+ int max;
+ int adout;
+};
+
+#define ADC_DATA(_id, _mux, _min, _max, _adout) \
+ [_id] = { \
+ .mux = _mux, \
+ .min = _min, \
+ .max = _max, \
+ .adout = _adout \
+ }
+
+struct adc_data adc_tab[] = {
+ ADC_DATA(GPADC0_V, 0x00, GPADC0_MIN, GPADC0_MAX, 0),
+ ADC_DATA(BTEMP_BALL, 0x0D, BTEMP_MIN, BTEMP_MAX, ADOUT0),
+ ADC_DATA(BAT_CTRL, 0x0D, BDATA_MIN, BDATA_MAX, 0),
+ ADC_DATA(MAIN_BAT_V, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TXON, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(VBUS_V, 0x10, VBUS_V_MIN, VBUS_V_MAX, 0),
+ ADC_DATA(USB_CHARGER_C, 0x0A, VBUS_I_MIN, VBUS_I_MAX, 0),
+ ADC_DATA(BK_BAT_V, 0x07, BKBAT_MIN, BKBAT_MAX, 0),
+ ADC_DATA(DIE_TEMP, 0x0F, DIETEMP_MIN, DIETEMP_MAX, ADOUT0),
+ ADC_DATA(PCB_TEMP, 0x13, PCBTEMP_MIN, PCBTEMP_MAX, ADOUT0),
+ ADC_DATA(XTAL_TEMP, 0x06, XTALTEMP_MIN, XTALTEMP_MAX, ADOUT0),
+ ADC_DATA(USB_ID, 0x1A, USBID_MIN, USBID_MAX, 0),
+ ADC_DATA(ACC_DETECT2, 0x18, ACCDET2_MIN, ACCDET2_MAX, 0),
+ ADC_DATA(ACC_DETECT3, 0x19, ACCDET3_MIN, ACCDET3_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(MAIN_BAT_V_TXON_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0),
+ ADC_DATA(VIBRA_KELVIN, 0x16, VIBRA_MIN, VIBRA_MAX, 0),
+};
+
+/**
+ * ab5500_gpadc_get() - returns a reference to the primary AB5500 GPADC
+ * (i.e. the first GPADC in the instance list)
+ */
+struct ab5500_gpadc *ab5500_gpadc_get(const char *name)
+{
+ struct ab5500_gpadc *gpadc;
+ list_for_each_entry(gpadc, &ab5500_gpadc_list, node) {
+ if (!strcmp(name, dev_name(gpadc->dev)))
+ return gpadc;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(ab5500_gpadc_get);
+
+#define CONV(min, max, x)\
+ ((min) + ((((max)-(min))*(x))/ADC_RESOLUTION))
+
+static int ab5500_gpadc_ad_to_voltage(struct ab5500_gpadc *gpadc,
+ u8 in, u16 ad_val)
+{
+ int res;
+
+ switch (in) {
+ case VIBRA_KELVIN:
+ case GPADC0_V:
+ case PCB_TEMP:
+ case BTEMP_BALL:
+ case MAIN_BAT_V:
+ case MAIN_BAT_V_TXON:
+ case ACC_DETECT2:
+ case ACC_DETECT3:
+ case VBUS_V:
+ case USB_CHARGER_C:
+ case BK_BAT_V:
+ case XTAL_TEMP:
+ case USB_ID:
+ case BAT_CTRL:
+ res = CONV(adc_tab[in].min, adc_tab[in].max, ad_val);
+ break;
+ case DIE_TEMP:
+ /*
+ * From the AB5500 product specification
+ * T(deg cel) = 27 - ((ADCode - 709)/2.4213)
+ * 27 + 709/2.4213 - ADCode/2.4213
+ * 320 - (ADCode/2.4213)
+ */
+ res = 320 - (((unsigned long)ad_val * 10000) / 24213);
+ break;
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel, not possible to convert\n");
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+/**
+ * ab5500_gpadc_convert() - gpadc conversion
+ * @input: analog input to be converted to digital data
+ *
+ * This function converts the selected analog i/p to digital
+ * data.
+ */
+int ab5500_gpadc_convert(struct ab5500_gpadc *gpadc, u8 input)
+{
+ int result, ret = -EINVAL;
+ u16 data = 0;
+ u8 looplimit = 0;
+ u8 status = 0;
+ u8 low_data, high_data, adout_mask, adout_val;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ mutex_lock(&gpadc->ab5500_gpadc_lock);
+
+ switch (input) {
+ case MAIN_BAT_V:
+ case MAIN_BAT_V_TXON:
+ /*
+ * The value of mux scale volatage depends
+ * on the type of battery
+ * for LI-ion use MUX_SCALE_35 => 2.3-3.5V
+ * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V
+ * Check type of battery from platform data TODO ???
+ */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to read status\n");
+ goto out;
+ }
+ break;
+ case BTEMP_BALL:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA18);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set mux scale\n");
+ goto out;
+ }
+ break;
+ case BAT_CTRL:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set mux scale\n");
+ goto out;
+ }
+ break;
+ case XTAL_TEMP:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL,
+ ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_EN);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set xtaltemp\n");
+ goto out;
+ }
+ break;
+ case GPADC0_V:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_GPADC0_MASK, GPADC0_SCALE_VOL18);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set gpadc0\n");
+ goto out;
+ }
+ break;
+ case ACC_DETECT2:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2,
+ MUX_SCALE_ACCDET2_MASK, ACCDET2_SCALE_VOL27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set accdet2\n");
+ goto out;
+ }
+ break;
+ case ACC_DETECT3:
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2,
+ MUX_SCALE_ACCDET3_MASK, ACCDET3_SCALE_VOL27);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set accdet3\n");
+ goto out;
+ }
+ break;
+ case VIBRA_KELVIN:
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_KELVIN_CTRL,
+ KELVIN_SCALE_VOL45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set kelv scale\n");
+ goto out;
+ }
+ break;
+ case USB_CHARGER_C:
+ case VBUS_V:
+ case BK_BAT_V:
+ case USB_ID:
+ case PCB_TEMP:
+ case DIE_TEMP:
+ break;
+ default:
+ dev_err(gpadc->dev, "gpadc: Wrong adc\n");
+ goto out;
+ break;
+ }
+ if (adc_tab[input].adout) {
+ adout_mask = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK;
+ adout_val = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_ON : GPADC_MANUAL_ADOUT1_ON;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ adout_mask, adout_val);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to set ADOUT\n");
+ goto out;
+ }
+ /* delay required to ramp up voltage on BDATA node */
+ usleep_range(10000, 12000);
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANUAL_MUX_CTRL, adc_tab[input].mux);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to trigger manual conv\n");
+ goto out;
+ }
+ /* wait for completion of conversion */
+ looplimit = 0;
+ do {
+ msleep(1);
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_STAT_REG,
+ &status);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to read status\n");
+ goto out;
+ }
+ if (status & GPADC_MANUAL_READY)
+ break;
+ } while (++looplimit < 2);
+ if (looplimit >= 2) {
+ dev_err(gpadc->dev, "timeout:failed to complete conversion\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Disable ADOUT for measurement
+ */
+ if (adc_tab[input].adout) {
+ adout_mask = adc_tab[input].adout == ADOUT0 ?
+ GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ adout_mask, 0x0);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to disable ADOUT\n");
+ goto out;
+ }
+ }
+ /*
+ * Disable XTAL TEMP
+ */
+ if (input == XTAL_TEMP) {
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL,
+ ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_DI);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to disable xtaltemp\n");
+ goto out;
+ }
+ }
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANDATAL_REG, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_MANDATAH_REG, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: read high data failed\n");
+ goto out;
+ }
+
+ data = (high_data << 2) | (low_data >> 6);
+ if (input == BAT_CTRL || input == BTEMP_BALL) {
+ /*
+ * TODO: Re-check with h/w team
+ * discard null or value < 5, as there is some error
+ * in conversion
+ */
+ if (data < 5)
+ data = gpadc->prev_bdata;
+ else
+ gpadc->prev_bdata = data;
+ }
+ result = ab5500_gpadc_ad_to_voltage(gpadc, input, data);
+
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ return result;
+
+out:
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ dev_err(gpadc->dev,
+ "gpadc: Failed to AD convert channel %d\n", input);
+ return ret;
+}
+EXPORT_SYMBOL(ab5500_gpadc_convert);
+
+/**
+ * ab5500_gpadc_program_auto() - gpadc conversion auto conversion
+ * @trig_index: Generic trigger channel for conversion
+ *
+ * This function program the auto trigger channel
+ */
+static int ab5500_gpadc_program_auto(struct ab5500_gpadc *gpadc, int trig)
+{
+ int ret;
+ u8 adout;
+#define MIN_INDEX 0x02
+#define MAX_INDEX 0x03
+#define CTRL_INDEX 0x01
+
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MIN_INDEX,
+ gpadc->adc_trig[trig].trig_min);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program min\n");
+ return ret;
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MAX_INDEX,
+ gpadc->adc_trig[trig].trig_max);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program max\n");
+ return ret;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2),
+ TRIGX_MUX_SELECT, gpadc->adc_trig[trig].auto_mux);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to select mux\n");
+ return ret;
+ }
+ if (gpadc->adc_trig[trig].adout) {
+ adout = gpadc->adc_trig[trig].adout == ADOUT0 ?
+ gpadc->adc_trig[trig].adout << 6 :
+ gpadc->adc_trig[trig].adout << 5;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX,
+ adout, adout);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program adout\n");
+ return ret;
+ }
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX,
+ TRIGX_FREQ_MASK, gpadc->adc_trig[trig].freq);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program freq\n");
+ return ret;
+ }
+ return ret;
+
+}
+
+#define TRIG_V(trigval, min, max) \
+ ((((trigval) - (min)) * AUTO_ADC_RESOLUTION) / ((max) - (min)))
+
+static int ab5500_gpadc_vbat_auto_conf(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *in)
+{
+ int trig_min, ret;
+ u8 trig_reg, trig_arm;
+
+ /* Scale mux voltage */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to set vbat scale\n");
+ return ret;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_CTRL1,
+ AUTO_VBAT_MASK, AUTO_VBAT_ON);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to set vbat on\n");
+ return ret;
+ }
+
+ trig_min = TRIG_V(in->min, adc_tab[in->mux].min, adc_tab[in->mux].max);
+
+ if (in->mux == MAIN_BAT_V_TRIG_MIN) {
+ trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX;
+ trig_arm = TRIG_VBAT_NOTX_ARM_MASK;
+ } else {
+ trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON;
+ trig_arm = TRIG_VBAT_TXON_ARM_MASK;
+ }
+ ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC,
+ trig_reg, trig_min);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to program vbat min\n");
+ return ret;
+ }
+ /*
+ * arm the trigger
+ */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL1, trig_arm, trig_arm);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: failed to trig vbat\n");
+ return ret;
+ }
+ return ret;
+}
+/**
+ * ab5500_gpadc_convert_auto() - gpadc conversion
+ * @auto_input: input trigger for conversion
+ *
+ * This function converts the selected channel from
+ * analog to digital data in auto mode
+ */
+
+int ab5500_gpadc_convert_auto(struct ab5500_gpadc *gpadc,
+ struct adc_auto_input *in)
+{
+ int ret, trig;
+ unsigned long flags;
+
+ if (!gpadc)
+ return -ENODEV;
+ mutex_lock(&gpadc->ab5500_gpadc_lock);
+
+ if (in->mux == MAIN_BAT_V_TXON_TRIG_MIN) {
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ if (gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag == true) {
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: Auto vbat txon busy");
+ goto out;
+ }
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+
+ ret = ab5500_gpadc_vbat_auto_conf(gpadc, in);
+ if (ret < 0)
+ goto out;
+
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_mux = in->mux;
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_callb =
+ in->auto_adc_callback;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ } else if (in->mux == MAIN_BAT_V_TRIG_MIN) {
+
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ if (gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag == true) {
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: Auto vbat busy");
+ goto out;
+ }
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+
+ ret = ab5500_gpadc_vbat_auto_conf(gpadc, in);
+ if (ret < 0)
+ goto out;
+
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_mux = in->mux;
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_callb =
+ in->auto_adc_callback;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ } else {
+ /*
+ * check if free trigger is available
+ */
+ trig = ADC_INPUT_TRIG0;
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ while (gpadc->adc_trig[trig].flag == true &&
+ trig <= ADC_INPUT_TRIG7)
+ trig++;
+
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ if (trig > ADC_INPUT_TRIG7) {
+ ret = -EBUSY;
+ dev_err(gpadc->dev, "gpadc: no free channel\n");
+ goto out;
+ }
+ switch (in->mux) {
+ case MAIN_BAT_V:
+ /*
+ * The value of mux scale volatage depends
+ * on the type of battery
+ * for LI-ion use MUX_SCALE_35 => 2.3-3.5V
+ * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V
+ * Check type of battery from platform data TODO ???
+ */
+ ret = abx500_mask_and_set_register_interruptible(
+ gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL,
+ MUX_SCALE_VBAT_MASK, MUX_SCALE_45);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: failed to read status\n");
+ goto out;
+ }
+
+ case BTEMP_BALL:
+ ret = abx500_set_register_interruptible(
+ gpadc->dev, AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_ADOUT0_CTRL,
+ ADOUT0_TRIGX_PRESTART);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: failed to set prestart\n");
+ goto out;
+ }
+
+ case ACC_DETECT2:
+ case ACC_DETECT3:
+ case VBUS_V:
+ case USB_CHARGER_C:
+ case BK_BAT_V:
+ case PCB_TEMP:
+ case USB_ID:
+ case BAT_CTRL:
+ gpadc->adc_trig[trig].trig_min =
+ (u8)TRIG_V(in->min, adc_tab[in->mux].min,
+ adc_tab[in->mux].max);
+ gpadc->adc_trig[trig].trig_max =
+ (u8)TRIG_V(in->max, adc_tab[in->mux].min,
+ adc_tab[in->mux].max);
+ gpadc->adc_trig[trig].adout =
+ adc_tab[in->mux].adout;
+ break;
+ case DIE_TEMP:
+ /*
+ * From the AB5500 product specification
+ * T(deg_cel) = 27 - (ADCode - 709)/2.4213)
+ * ADCode = 709 + (2.4213 * (27 - T))
+ * Auto trigger min/max level is of 8bit precision.
+ * Hence use AB5500_GPADC_MANDATAH_REG value
+ * obtained by 2 bit right shift of ADCode.
+ */
+ gpadc->adc_trig[trig].trig_min =
+ (709 + ((24213 * (27 - in->min))/10000))>>2;
+ gpadc->adc_trig[trig].trig_max =
+ (709 + ((24213 * (27 - in->max))/10000))>>2;
+ gpadc->adc_trig[trig].adout =
+ adc_tab[in->mux].adout;
+ break;
+ default:
+ dev_err(gpadc->dev, "Unknow GPADC request\n");
+ break;
+ }
+ gpadc->adc_trig[trig].freq = in->freq;
+ gpadc->adc_trig[trig].auto_mux =
+ adc_tab[in->mux].mux;
+ gpadc->adc_trig[trig].auto_callb = in->auto_adc_callback;
+
+ ret = ab5500_gpadc_program_auto(gpadc, trig);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc: fail to program auto ch\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC,
+ AB5500_GPADC_AUTO_TRIG_INDEX + (trig * 4),
+ TRIGX_ARM_MASK, TRIGX_ARM);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: fail to trigger\n");
+ goto out;
+ }
+ spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags);
+ gpadc->adc_trig[trig].flag = true;
+ spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags);
+ }
+out:
+ mutex_unlock(&gpadc->ab5500_gpadc_lock);
+ return ret;
+
+}
+EXPORT_SYMBOL(ab5500_gpadc_convert_auto);
+
+/* sysfs interface for GPADC0 */
+static ssize_t ab5500_gpadc0_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int voltage;
+ struct ab5500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ voltage = ab5500_gpadc_convert(gpadc, GPADC0_V);
+
+ return sprintf(buf, "%d\n", voltage);
+}
+static DEVICE_ATTR(adc0volt, 0644, ab5500_gpadc0_get, NULL);
+
+static void ab5500_gpadc_trigx_work(struct ab5500_gpadc *gp, int trig)
+{
+ unsigned long flags;
+ if (gp->adc_trig[trig].auto_callb != NULL) {
+ gp->adc_trig[trig].auto_callb(gp->adc_trig[trig].auto_mux);
+ spin_lock_irqsave(&gp->gpadc_auto_lock, flags);
+ gp->adc_trig[trig].flag = false;
+ spin_unlock_irqrestore(&gp->gpadc_auto_lock, flags);
+ } else {
+ dev_err(gp->dev, "Unknown trig for %d\n", trig);
+ }
+}
+/**
+ * ab5500_gpadc_trig0_work() - work item for trig0 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 0 auto conversion.
+ */
+static void ab5500_gpadc_trig0_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig0_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG0);
+}
+
+/**
+ * ab5500_gpadc_trig1_work() - work item for trig1 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig1 auto conversion.
+ */
+static void ab5500_gpadc_trig1_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig1_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG1);
+}
+
+/**
+ * ab5500_gpadc_trig2_work() - work item for trig2 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 2 auto conversion.
+ */
+static void ab5500_gpadc_trig2_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig2_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG2);
+}
+
+/**
+ * ab5500_gpadc_trig3_work() - work item for trig3 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 3 auto conversion.
+ */
+static void ab5500_gpadc_trig3_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig3_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG3);
+}
+
+/**
+ * ab5500_gpadc_trig4_work() - work item for trig4 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 4 auto conversion.
+ */
+static void ab5500_gpadc_trig4_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig4_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG4);
+}
+
+/**
+ * ab5500_gpadc_trig5_work() - work item for trig5 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 5 auto conversion.
+ */
+static void ab5500_gpadc_trig5_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig5_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG5);
+}
+
+/**
+ * ab5500_gpadc_trig6_work() - work item for trig6 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 6 auto conversion.
+ */
+static void ab5500_gpadc_trig6_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig6_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG6);
+}
+
+/**
+ * ab5500_gpadc_trig7_work() - work item for trig7 auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for trig 7 auto conversion.
+ */
+static void ab5500_gpadc_trig7_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig7_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG7);
+}
+
+/**
+ * ab5500_gpadc_vbat_txon_work() - work item for vbat_txon trigger auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for vbat_txon trigger auto adc.
+ */
+static void ab5500_gpadc_vbat_txon_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig_vbat_txon_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXON);
+}
+
+/**
+ * ab5500_gpadc_vbat_txoff_work() - work item for vbat_txoff trigger auto adc
+ * @irq: irq number
+ * @work: work pointer
+ *
+ * This is a work handler for vbat_txoff trigger auto adc.
+ */
+static void ab5500_gpadc_vbat_txoff_work(struct work_struct *work)
+{
+ struct ab5500_gpadc *gpadc = container_of(work,
+ struct ab5500_gpadc, gpadc_trig_vbat_txoff_work);
+ ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXOFF);
+}
+
+/**
+ * ab5500_adc_trigx_handler() - isr for auto gpadc conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto gpadc conversion.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_trigx_handler(int irq, void *_gpadc)
+{
+ struct ab5500_platform_data *plat;
+ struct ab5500_gpadc *gpadc = _gpadc;
+ int dev_irq;
+
+ plat = dev_get_platdata(gpadc->dev->parent);
+ dev_irq = irq - plat->irq.base;
+
+ switch (dev_irq) {
+ case AB5500_INT_ADC_TRIG0:
+ dev_dbg(gpadc->dev, "Trigger 0 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig0_work);
+ break;
+ case AB5500_INT_ADC_TRIG1:
+ dev_dbg(gpadc->dev, "Trigger 1 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig1_work);
+ break;
+ case AB5500_INT_ADC_TRIG2:
+ dev_dbg(gpadc->dev, "Trigger 2 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig2_work);
+ break;
+ case AB5500_INT_ADC_TRIG3:
+ dev_dbg(gpadc->dev, "Trigger 3 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig3_work);
+ break;
+ case AB5500_INT_ADC_TRIG4:
+ dev_dbg(gpadc->dev, "Trigger 4 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig4_work);
+ break;
+ case AB5500_INT_ADC_TRIG5:
+ dev_dbg(gpadc->dev, "Trigger 5 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig5_work);
+ break;
+ case AB5500_INT_ADC_TRIG6:
+ dev_dbg(gpadc->dev, "Trigger 6 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig6_work);
+ break;
+ case AB5500_INT_ADC_TRIG7:
+ dev_dbg(gpadc->dev, "Trigger 7 received\n");
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig7_work);
+ break;
+ default:
+ dev_dbg(gpadc->dev, "unknown trigx handler input\n");
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_adc_vbat_txon_handler() - isr for auto vbat_txon conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto vbat_txon conversion
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_vbat_txon_handler(int irq, void *_gpadc)
+{
+ struct ab5500_gpadc *gpadc = _gpadc;
+
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txon_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_adc_vbat_txoff_handler() - isr for auto vbat_txoff conversion trigger
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for auto vbat_txoff conversion
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_adc_vbat_txoff_handler(int irq, void *_gpadc)
+{
+ struct ab5500_gpadc *gpadc = _gpadc;
+
+ queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txoff_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_gpadc_configuration() - function for gpadc conversion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This function configures the gpadc
+ */
+static int ab5500_gpadc_configuration(struct ab5500_gpadc *gpadc)
+{
+ int ret;
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL2,
+ ADC_CAL_OFF_MASK | ADC_ON_MODE_MASK,
+ ADC_CAL_ON | ADC_FULLPWR);
+ return ret;
+}
+
+/* ab5500 btemp driver interrupts and their respective isr */
+static struct ab5500_adc_interrupts ab5500_adc_irq[] = {
+ {"TRIGGER-0", ab5500_adc_trigx_handler},
+ {"TRIGGER-1", ab5500_adc_trigx_handler},
+ {"TRIGGER-2", ab5500_adc_trigx_handler},
+ {"TRIGGER-3", ab5500_adc_trigx_handler},
+ {"TRIGGER-4", ab5500_adc_trigx_handler},
+ {"TRIGGER-5", ab5500_adc_trigx_handler},
+ {"TRIGGER-6", ab5500_adc_trigx_handler},
+ {"TRIGGER-7", ab5500_adc_trigx_handler},
+ {"TRIGGER-VBAT-TXON", ab5500_adc_vbat_txon_handler},
+ {"TRIGGER-VBAT", ab5500_adc_vbat_txoff_handler},
+};
+
+static int __devinit ab5500_gpadc_probe(struct platform_device *pdev)
+{
+ int ret, irq, i, j;
+ struct ab5500_gpadc *gpadc;
+
+ gpadc = kzalloc(sizeof(struct ab5500_gpadc), GFP_KERNEL);
+ if (!gpadc) {
+ dev_err(&pdev->dev, "Error: No memory\n");
+ return -ENOMEM;
+ }
+ gpadc->dev = &pdev->dev;
+ mutex_init(&gpadc->ab5500_gpadc_lock);
+ spin_lock_init(&gpadc->gpadc_auto_lock);
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_adc_irq[i].isr,
+ IRQF_NO_SUSPEND,
+ ab5500_adc_irq[i].name, gpadc);
+
+ if (ret) {
+ dev_err(gpadc->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_adc_irq[i].name, irq, ret);
+ goto fail_irq;
+ }
+ dev_dbg(gpadc->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_adc_irq[i].name, irq, ret);
+ }
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(gpadc->dev);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "failed to get chip ID\n");
+ goto fail_irq;
+ }
+ gpadc->chip_id = (u8) ret;
+
+ /* Create a work queue for gpadc auto */
+ gpadc->gpadc_wq =
+ create_singlethread_workqueue("ab5500_gpadc_wq");
+ if (gpadc->gpadc_wq == NULL) {
+ dev_err(gpadc->dev, "failed to create work queue\n");
+ goto fail_irq;
+ }
+
+ INIT_WORK(&gpadc->gpadc_trig0_work, ab5500_gpadc_trig0_work);
+ INIT_WORK(&gpadc->gpadc_trig1_work, ab5500_gpadc_trig1_work);
+ INIT_WORK(&gpadc->gpadc_trig2_work, ab5500_gpadc_trig2_work);
+ INIT_WORK(&gpadc->gpadc_trig3_work, ab5500_gpadc_trig3_work);
+ INIT_WORK(&gpadc->gpadc_trig4_work, ab5500_gpadc_trig4_work);
+ INIT_WORK(&gpadc->gpadc_trig5_work, ab5500_gpadc_trig5_work);
+ INIT_WORK(&gpadc->gpadc_trig6_work, ab5500_gpadc_trig6_work);
+ INIT_WORK(&gpadc->gpadc_trig7_work, ab5500_gpadc_trig7_work);
+ INIT_WORK(&gpadc->gpadc_trig_vbat_txon_work,
+ ab5500_gpadc_vbat_txon_work);
+ INIT_WORK(&gpadc->gpadc_trig_vbat_txoff_work,
+ ab5500_gpadc_vbat_txoff_work);
+
+ for (j = 0; j < N_AUTO_TRIGGER; j++)
+ gpadc->adc_trig[j].flag = false;
+
+ ret = ab5500_gpadc_configuration(gpadc);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc: configuration failed\n");
+ goto free_wq;
+ }
+
+ ret = device_create_file(gpadc->dev, &dev_attr_adc0volt);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "File device creation failed: %d\n", ret);
+ ret = -ENODEV;
+ goto fail_sysfs;
+ }
+ list_add_tail(&gpadc->node, &ab5500_gpadc_list);
+
+ platform_set_drvdata(pdev, gpadc);
+
+ return 0;
+fail_sysfs:
+free_wq:
+ destroy_workqueue(gpadc->gpadc_wq);
+fail_irq:
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ free_irq(irq, gpadc);
+ }
+ kfree(gpadc);
+ gpadc = NULL;
+ return ret;
+}
+
+static int __devexit ab5500_gpadc_remove(struct platform_device *pdev)
+{
+ int i, irq;
+ struct ab5500_gpadc *gpadc = platform_get_drvdata(pdev);
+
+ device_remove_file(gpadc->dev, &dev_attr_adc0volt);
+
+ /* remove this gpadc entry from the list */
+ list_del(&gpadc->node);
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name);
+ free_irq(irq, gpadc);
+ }
+ /* Flush work */
+ flush_workqueue(gpadc->gpadc_wq);
+
+ /* Delete the work queue */
+ destroy_workqueue(gpadc->gpadc_wq);
+
+ kfree(gpadc);
+ gpadc = NULL;
+ return 0;
+}
+
+static struct platform_driver ab5500_gpadc_driver = {
+ .probe = ab5500_gpadc_probe,
+ .remove = __devexit_p(ab5500_gpadc_remove),
+ .driver = {
+ .name = "ab5500-adc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_gpadc_init(void)
+{
+ return platform_driver_register(&ab5500_gpadc_driver);
+}
+
+static void __exit ab5500_gpadc_exit(void)
+{
+ platform_driver_unregister(&ab5500_gpadc_driver);
+}
+
+subsys_initcall_sync(ab5500_gpadc_init);
+module_exit(ab5500_gpadc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Vijaya Kumar K");
+MODULE_ALIAS("platform:ab5500_adc");
+MODULE_DESCRIPTION("AB5500 GPADC driver");
diff --git a/drivers/mfd/ab5500-power.c b/drivers/mfd/ab5500-power.c
new file mode 100644
index 00000000000..9474c32809b
--- /dev/null
+++ b/drivers/mfd/ab5500-power.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+static struct device *dev;
+
+/* STARTUP */
+#define AB5500_SYSPOR_CONTROL 0x30
+
+/* VINT IO I2C CLOCK */
+#define AB5500_RTC_VINT 0x01
+
+int ab5500_clock_rtc_enable(int num, bool enable)
+{
+ /* RTC_CLK{0,1,2} are bits {4,3,2}, active low */
+ u8 mask = BIT(4 - num);
+ u8 value = enable ? 0 : mask;
+
+ /* Don't allow RTC_CLK0 to be controlled. */
+ if (num < 1 || num > 2)
+ return -EINVAL;
+
+ if (!dev)
+ return -EAGAIN;
+
+ return abx500_mask_and_set(dev, AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ AB5500_RTC_VINT, mask, value);
+}
+
+static void ab5500_power_off(void)
+{
+ sigset_t old;
+ sigset_t all;
+
+ sigfillset(&all);
+
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ /* Clear dbb_on */
+ int ret = abx500_set(dev, AB5500_BANK_STARTUP,
+ AB5500_SYSPOR_CONTROL, 0);
+ WARN_ON(ret);
+ }
+}
+
+static int __devinit ab5500_power_probe(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent);
+
+ dev = &pdev->dev;
+
+ if (plat->pm_power_off)
+ pm_power_off = ab5500_power_off;
+
+ return 0;
+}
+
+static int __devexit ab5500_power_remove(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent);
+
+ if (plat->pm_power_off)
+ pm_power_off = NULL;
+ dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver ab5500_power_driver = {
+ .driver = {
+ .name = "ab5500-power",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_power_probe,
+ .remove = __devexit_p(ab5500_power_remove),
+};
+
+static int __init ab8500_sysctrl_init(void)
+{
+ return platform_driver_register(&ab5500_power_driver);
+}
+
+subsys_initcall(ab8500_sysctrl_init);
+
+MODULE_DESCRIPTION("AB5500 power driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1f08704f7ae..8137ac22816 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -100,6 +100,9 @@
#define AB9540_MODEM_CTRL2_REG 0x23
#define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2)
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
/*
* Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
* numbers are indexed into this array with (num / 8). The interupts are
@@ -257,6 +260,7 @@ static struct abx500_ops ab8500_ops = {
.mask_and_set_register = ab8500_mask_and_set_register,
.event_registers_startup_state_get = NULL,
.startup_irq_enabled = NULL,
+ .dump_all_banks = ab8500_dump_all_banks,
};
static void ab8500_irq_lock(struct irq_data *data)
@@ -354,6 +358,7 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
int line = i * 8 + bit;
handle_nested_irq(ab8500->irq_base + line);
+ ab8500_debug_register_interrupt(line);
value &= ~(1 << bit);
} while (value);
}
@@ -746,7 +751,7 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
static struct resource __devinitdata ab8500_temp_resources[] = {
{
- .name = "AB8500_TEMP_WARM",
+ .name = "ABX500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
.end = AB8500_INT_TEMP_WARM,
.flags = IORESOURCE_IRQ,
@@ -768,6 +773,9 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.name = "ab8500-regulator",
},
{
+ .name = "ab8500-regulator-debug",
+ },
+ {
.name = "ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
.resources = ab8500_gpadc_resources,
@@ -778,26 +786,6 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-charger",
- .num_resources = ARRAY_SIZE(ab8500_charger_resources),
- .resources = ab8500_charger_resources,
- },
- {
- .name = "ab8500-btemp",
- .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
- .resources = ab8500_btemp_resources,
- },
- {
- .name = "ab8500-fg",
- .num_resources = ARRAY_SIZE(ab8500_fg_resources),
- .resources = ab8500_fg_resources,
- },
- {
- .name = "ab8500-chargalg",
- .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
- .resources = ab8500_chargalg_resources,
- },
- {
.name = "ab8500-acc-det",
.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
.resources = ab8500_av_acc_detect_resources,
@@ -815,20 +803,12 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.name = "ab8500-pwm",
.id = 1,
},
- {
- .name = "ab8500-pwm",
- .id = 2,
- },
- {
- .name = "ab8500-pwm",
- .id = 3,
- },
{ .name = "ab8500-leds", },
{
.name = "ab8500-denc",
},
{
- .name = "ab8500-temp",
+ .name = "abx500-temp",
.num_resources = ARRAY_SIZE(ab8500_temp_resources),
.resources = ab8500_temp_resources,
},
@@ -860,6 +840,29 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
},
};
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+ {
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+};
+
static ssize_t show_chip_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1130,6 +1133,15 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
if (ret)
goto out_freeirq;
+ if (!no_bm) {
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
+
if (is_ab9540(ab8500))
ret = sysfs_create_group(&ab8500->dev->kobj,
&ab9540_attr_group);
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211aa889..7b912afd664 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -4,6 +4,72 @@
* Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
* License Terms: GNU General Public License v2
*/
+/*
+ * AB8500 register access
+ * ======================
+ *
+ * read:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # cat <debugfs>/ab8500/register-value
+ *
+ * write:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # echo ADDR > <debugfs>/ab8500/register-address
+ * # echo VALUE > <debugfs>/ab8500/register-value
+ *
+ * read all registers from a bank:
+ * # echo BANK > <debugfs>/ab8500/register-bank
+ * # cat <debugfs>/ab8500/all-bank-register
+ *
+ * BANK target AB8500 register bank
+ * ADDR target AB8500 register address
+ * VALUE decimal or 0x-prefixed hexadecimal
+ *
+ *
+ * User Space notification on AB8500 IRQ
+ * =====================================
+ *
+ * Allows user space entity to be notified when target AB8500 IRQ occurs.
+ * When subscribed, a sysfs entry is created in ab8500.i2c platform device.
+ * One can pool this file to get target IRQ occurence information.
+ *
+ * subscribe to an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-subscribe
+ *
+ * unsubscribe from an AB8500 IRQ:
+ * # echo IRQ > <debugfs>/ab8500/irq-unsubscribe
+ *
+ *
+ * AB8500 register formated read/write access
+ * ==========================================
+ *
+ * Read: read data, data>>SHIFT, data&=MASK, output data
+ * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE
+ * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data
+ * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98]
+ *
+ * Usage:
+ * # echo "CMD [OPTIONS] BANK ADRESS [VALUE]" > $debugfs/ab8500/hwreg
+ *
+ * CMD read read access
+ * write write access
+ *
+ * BANK target reg bank
+ * ADDRESS target reg address
+ * VALUE (write) value to be updated
+ *
+ * OPTIONS
+ * -d|-dec (read) output in decimal
+ * -h|-hexa (read) output in 0x-hexa (default)
+ * -l|-w|-b 32bit (default), 16bit or 8bit reg access
+ * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF)
+ * -s|-shift SHIFT bit shift value (read:left, write:right)
+ * -o|-offset OFFSET address offset to add to ADDRESS value
+ *
+ * Warning: bit shift operation is applied to bit-mask.
+ * Warning: bit shift direction depends on read or right command.
+ */
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -11,13 +77,29 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/string.h>
+#include <linux/ctype.h>
+#endif
static u32 debug_bank;
static u32 debug_address;
+static int irq_first;
+static int irq_last;
+static u32 *irq_count;
+static int num_irqs;
+
+static struct device_attribute **dev_attr;
+static char **event_name;
+
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -42,15 +124,35 @@ struct ab8500_i2c_ranges {
const struct ab8500_reg_range *range;
};
+/* hwreg- "mask" and "shift" entries ressources */
+struct hwreg_cfg {
+ u32 bank; /* target bank */
+ u32 addr; /* target address */
+ uint fmt; /* format */
+ uint mask; /* read/write mask, applied before any bit shift */
+ int shift; /* bit shift (read:right shift, write:left shift */
+};
+/* fmt bit #0: 0=hexa, 1=dec */
+#define REG_FMT_DEC(c) ((c)->fmt & 0x1)
+#define REG_FMT_HEX(c) (!REG_FMT_DEC(c))
+
+static struct hwreg_cfg hwreg_cfg = {
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+};
+
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_NUM_BANKS 22
+#define AB8500_ADC_NAME_STRING "gpadc"
+#define AB8500_NUM_BANKS 24
#define AB8500_REV_REG 0x80
static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
- .range = 0,
+ .range = NULL,
},
[AB8500_SYS_CTRL1_BLOCK] = {
.num_ranges = 3,
@@ -215,7 +317,7 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
[AB8500_CHARGER] = {
- .num_ranges = 8,
+ .num_ranges = 9,
.range = (struct ab8500_reg_range[]) {
{
.first = 0x00,
@@ -249,6 +351,10 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
.first = 0xC0,
.last = 0xC2,
},
+ {
+ .first = 0xf5,
+ .last = 0xf6,
+ },
},
},
[AB8500_GAS_GAUGE] = {
@@ -268,6 +374,24 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
},
},
+ [AB8500_DEVELOPMENT] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x00,
+ },
+ },
+ },
+ [AB8500_DEBUG] = {
+ .num_ranges = 1,
+ .range = (struct ab8500_reg_range[]) {
+ {
+ .first = 0x05,
+ .last = 0x07,
+ },
+ },
+ },
[AB8500_AUDIO] = {
.num_ranges = 1,
.range = (struct ab8500_reg_range[]) {
@@ -354,15 +478,30 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
},
};
-static int ab8500_registers_print(struct seq_file *s, void *p)
+static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
- struct device *dev = s->private;
- unsigned int i;
- u32 bank = debug_bank;
+ char buf[16];
+ struct kobject *kobj = (struct kobject *)data;
+ unsigned int irq_abb = irq - irq_first;
- seq_printf(s, AB8500_NAME_STRING " register values:\n");
+ if (irq_abb < num_irqs)
+ irq_count[irq_abb]++;
+ /*
+ * This makes it possible to use poll for events (POLLPRI | POLLERR)
+ * from userspace on sysfs file named <irq-nr>
+ */
+ sprintf(buf, "%d", irq);
+ sysfs_notify(kobj, NULL, buf);
+
+ return IRQ_HANDLED;
+}
+
+/* Prints to seq_file or log_buf */
+static int ab8500_registers_print(struct device *dev, u32 bank,
+ struct seq_file *s)
+{
+ unsigned int i;
- seq_printf(s, " bank %u:\n", bank);
for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
u32 reg;
@@ -379,22 +518,42 @@ static int ab8500_registers_print(struct seq_file *s, void *p)
return err;
}
- err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
- reg, value);
- if (err < 0) {
- dev_err(dev, "seq_printf overflow\n");
- /* Error is not returned here since
- * the output is wanted in any case */
- return 0;
+ if (s) {
+ err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n",
+ bank, reg, value);
+ if (err < 0) {
+ dev_err(dev,
+ "seq_printf overflow bank=%d reg=%d\n",
+ bank, reg);
+ /* Error is not returned here since
+ * the output is wanted in any case */
+ return 0;
+ }
+ } else {
+ printk(KERN_INFO" [%u/0x%02X]: 0x%02X\n", bank,
+ reg, value);
}
}
}
return 0;
}
+static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ u32 bank = debug_bank;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ seq_printf(s, " bank %u:\n", bank);
+
+ ab8500_registers_print(dev, bank, s);
+ return 0;
+}
+
static int ab8500_registers_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_registers_print, inode->i_private);
+ return single_open(file, ab8500_print_bank_registers, inode->i_private);
}
static const struct file_operations ab8500_registers_fops = {
@@ -405,6 +564,64 @@ static const struct file_operations ab8500_registers_fops = {
.owner = THIS_MODULE,
};
+static int ab8500_print_all_banks(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ unsigned int i;
+ int err;
+
+ seq_printf(s, AB8500_NAME_STRING " register values:\n");
+
+ for (i = 1; i < AB8500_NUM_BANKS; i++) {
+ err = seq_printf(s, " bank %u:\n", i);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow, bank=%d\n", i);
+
+ ab8500_registers_print(dev, i, s);
+ }
+ return 0;
+}
+
+/* Dump registers to kernel log */
+void ab8500_dump_all_banks(struct device *dev)
+{
+ unsigned int i;
+
+ printk(KERN_INFO"ab8500 register values:\n");
+
+ for (i = 1; i < AB8500_NUM_BANKS; i++) {
+ printk(KERN_INFO" bank %u:\n", i);
+ ab8500_registers_print(dev, i, NULL);
+ }
+}
+
+static int ab8500_all_banks_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *s;
+ int err;
+
+ err = single_open(file, ab8500_print_all_banks, inode->i_private);
+ if (!err) {
+ /* Default buf size in seq_read is not enough */
+ s = (struct seq_file *)file->private_data;
+ s->size = (PAGE_SIZE * 2);
+ s->buf = kmalloc(s->size, GFP_KERNEL);
+ if (!s->buf) {
+ single_release(inode, file);
+ err = -ENOMEM;
+ }
+ }
+ return err;
+}
+
+static const struct file_operations ab8500_all_banks_fops = {
+ .open = ab8500_all_banks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static int ab8500_bank_print(struct seq_file *s, void *p)
{
return seq_printf(s, "%d\n", debug_bank);
@@ -515,10 +732,761 @@ static ssize_t ab8500_val_write(struct file *file,
printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
return -EINVAL;
}
+ return count;
+}
+
+/*
+ * Interrupt status
+ */
+static u32 num_interrupts[AB8500_MAX_NR_IRQS];
+static int num_interrupt_lines;
+
+void ab8500_debug_register_interrupt(int line)
+{
+ if (line < num_interrupt_lines)
+ num_interrupts[line]++;
+}
+
+static int ab8500_interrupts_print(struct seq_file *s, void *p)
+{
+ int line;
+
+ seq_printf(s, "irq: number of\n");
+
+ for (line = 0; line < num_interrupt_lines; line++)
+ seq_printf(s, "%3i: %6i\n", line, num_interrupts[line]);
+
+ return 0;
+}
+
+static int ab8500_interrupts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_interrupts_print, inode->i_private);
+}
+
+/*
+ * - HWREG DB8500 formated routines
+ */
+static int ab8500_hwreg_print(struct seq_file *s, void *d)
+{
+ struct device *dev = s->private;
+ int ret;
+ u8 regvalue;
+
+ ret = abx500_get_register_interruptible(dev,
+ (u8)hwreg_cfg.bank, (u8)hwreg_cfg.addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (hwreg_cfg.shift >= 0)
+ regvalue >>= hwreg_cfg.shift;
+ else
+ regvalue <<= -hwreg_cfg.shift;
+ regvalue &= hwreg_cfg.mask;
+
+ if (REG_FMT_DEC(&hwreg_cfg))
+ seq_printf(s, "%d\n", regvalue);
+ else
+ seq_printf(s, "0x%02X\n", regvalue);
+ return 0;
+}
+
+static int ab8500_hwreg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_hwreg_print, inode->i_private);
+}
+
+static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+{
+ int bat_ctrl_raw;
+ int bat_ctrl_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL);
+ bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BAT_CTRL, bat_ctrl_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bat_ctrl_convert, bat_ctrl_raw);
+}
+
+static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
+ .open = ab8500_gpadc_bat_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+{
+ int btemp_ball_raw;
+ int btemp_ball_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL);
+ btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
+ btemp_ball_raw);
+
+ return seq_printf(s,
+ "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+}
+
+static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
+ .open = ab8500_gpadc_btemp_ball_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+{
+ int main_charger_v_raw;
+ int main_charger_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V);
+ main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_V, main_charger_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_v_convert, main_charger_v_raw);
+}
+
+static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_v_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
+ .open = ab8500_gpadc_main_charger_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+{
+ int acc_detect1_raw;
+ int acc_detect1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1);
+ acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
+ acc_detect1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect1_convert, acc_detect1_raw);
+}
+
+static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect1_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
+ .open = ab8500_gpadc_acc_detect1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+{
+ int acc_detect2_raw;
+ int acc_detect2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2);
+ acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ ACC_DETECT2, acc_detect2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ acc_detect2_convert, acc_detect2_raw);
+}
+
+static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_acc_detect2_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
+ .open = ab8500_gpadc_acc_detect2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+{
+ int aux1_raw;
+ int aux1_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1);
+ aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
+ aux1_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux1_convert, aux1_raw);
+}
+
+static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux1_fops = {
+ .open = ab8500_gpadc_aux1_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+{
+ int aux2_raw;
+ int aux2_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2);
+ aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
+ aux2_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ aux2_convert, aux2_raw);
+}
+
+static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_aux2_fops = {
+ .open = ab8500_gpadc_aux2_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+{
+ int main_bat_v_raw;
+ int main_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V);
+ main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
+ main_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_bat_v_convert, main_bat_v_raw);
+}
+
+static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
+ .open = ab8500_gpadc_main_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+{
+ int vbus_v_raw;
+ int vbus_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V);
+ vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
+ vbus_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ vbus_v_convert, vbus_v_raw);
+}
+
+static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_vbus_v_fops = {
+ .open = ab8500_gpadc_vbus_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+{
+ int main_charger_c_raw;
+ int main_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C);
+ main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ MAIN_CHARGER_C, main_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ main_charger_c_convert, main_charger_c_raw);
+}
+
+static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_main_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
+ .open = ab8500_gpadc_main_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+{
+ int usb_charger_c_raw;
+ int usb_charger_c_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C);
+ usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ USB_CHARGER_C, usb_charger_c_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ usb_charger_c_convert, usb_charger_c_raw);
+}
+
+static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_gpadc_usb_charger_c_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
+ .open = ab8500_gpadc_usb_charger_c_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+{
+ int bk_bat_v_raw;
+ int bk_bat_v_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V);
+ bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+ BK_BAT_V, bk_bat_v_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ bk_bat_v_convert, bk_bat_v_raw);
+}
+
+static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
+ .open = ab8500_gpadc_bk_bat_v_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+{
+ int die_temp_raw;
+ int die_temp_convert;
+ struct ab8500_gpadc *gpadc;
+
+ gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP);
+ die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
+ die_temp_raw);
+
+ return seq_printf(s, "%d,0x%X\n",
+ die_temp_convert, die_temp_raw);
+}
+
+static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_die_temp_fops = {
+ .open = ab8500_gpadc_die_temp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * return length of an ASCII numerical value, 0 is string is not a
+ * numerical value.
+ * string shall start at value 1st char.
+ * string can be tailed with \0 or space or newline chars only.
+ * value can be decimal or hexadecimal (prefixed 0x or 0X).
+ */
+static int strval_len(char *b)
+{
+ char *s = b;
+ if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
+ s += 2;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isxdigit(*s))
+ return 0;
+ }
+ } else {
+ if (*s == '-')
+ s++;
+ for (; *s && (*s != ' ') && (*s != '\n'); s++) {
+ if (!isdigit(*s))
+ return 0;
+ }
+ }
+ return (int) (s-b);
+}
+
+/*
+ * parse hwreg input data.
+ * update global hwreg_cfg only if input data syntax is ok.
+ */
+static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
+ struct device *dev)
+{
+ uint write, val = 0;
+ struct hwreg_cfg loc = {
+ .bank = 0, /* default: invalid phys addr */
+ .addr = 0, /* default: invalid phys addr */
+ .fmt = 0, /* default: 32bit access, hex output */
+ .mask = 0xFFFFFFFF, /* default: no mask */
+ .shift = 0, /* default: no bit shift */
+ };
+
+ /* read or write ? */
+ if (!strncmp(b, "read ", 5)) {
+ write = 0;
+ b += 5;
+ } else if (!strncmp(b, "write ", 6)) {
+ write = 1;
+ b += 6;
+ } else
+ return -EINVAL;
+
+ /* OPTIONS -l|-w|-b -s -m -o */
+ while ((*b == ' ') || (*b == '-')) {
+ if (*(b-1) != ' ') {
+ b++;
+ continue;
+ }
+ if ((!strncmp(b, "-d ", 3)) ||
+ (!strncmp(b, "-dec ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt |= (1<<0);
+ } else if ((!strncmp(b, "-h ", 3)) ||
+ (!strncmp(b, "-hex ", 5))) {
+ b += (*(b+2) == ' ') ? 3 : 5;
+ loc.fmt &= ~(1<<0);
+ } else if ((!strncmp(b, "-m ", 3)) ||
+ (!strncmp(b, "-mask ", 6))) {
+ b += (*(b+2) == ' ') ? 3 : 6;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.mask = simple_strtoul(b, &b, 0);
+ } else if ((!strncmp(b, "-s ", 3)) ||
+ (!strncmp(b, "-shift ", 7))) {
+ b += (*(b+2) == ' ') ? 3 : 7;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.shift = simple_strtol(b, &b, 0);
+ } else {
+ return -EINVAL;
+ }
+ }
+ /* get arg BANK and ADDRESS */
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.bank = simple_strtoul(b, &b, 0);
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ loc.addr = simple_strtoul(b, &b, 0);
+
+ if (write) {
+ while (*b == ' ')
+ b++;
+ if (strval_len(b) == 0)
+ return -EINVAL;
+ val = simple_strtoul(b, &b, 0);
+ }
+
+ /* args are ok, update target cfg (mainly for read) */
+ *cfg = loc;
+
+#ifdef ABB_HWREG_DEBUG
+ pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d"
+ "value=0x%X\n", (write) ? "write" : "read",
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
+#endif
+
+ if (write) {
+ u8 regvalue;
+ int ret = abx500_get_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, &regvalue);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ if (cfg->shift >= 0) {
+ regvalue &= ~(cfg->mask << (cfg->shift));
+ val = (val & cfg->mask) << (cfg->shift);
+ } else {
+ regvalue &= ~(cfg->mask >> (-cfg->shift));
+ val = (val & cfg->mask) >> (-cfg->shift);
+ }
+ val = val | regvalue;
+
+ ret = abx500_set_register_interruptible(dev,
+ (u8)cfg->bank, (u8)cfg->addr, (u8)val);
+ if (ret < 0) {
+ pr_err("abx500_set_reg failed %d, %d", ret, __LINE__);
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
+static ssize_t ab8500_hwreg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[128];
+ int buf_size, ret;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* get args and process */
+ ret = hwreg_common_write(buf, &hwreg_cfg, dev);
+ return (ret) ? ret : buf_size;
+}
+
+/*
+ * - irq subscribe/unsubscribe stuff
+ */
+static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", irq_first);
+
+ return 0;
+}
+
+static int ab8500_subscribe_unsubscribe_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_subscribe_unsubscribe_print,
+ inode->i_private);
+}
+
+/*
+ * Userspace should use poll() on this file. When an event occur
+ * the blocking poll will be released.
+ */
+static ssize_t show_irq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long name;
+ unsigned int irq_index;
+ int err;
+
+ err = strict_strtoul(attr->attr.name, 0, &name);
+ if (err)
+ return err;
+
+ irq_index = name - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+ else
+ return sprintf(buf, "%u\n", irq_count[irq_index]);
+}
+
+static ssize_t ab8500_subscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+
+ /*
+ * This will create a sysfs file named <irq-nr> which userspace can
+ * use to select or poll and get the AB8500 events
+ */
+ dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
+ GFP_KERNEL);
+ event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL);
+ sprintf(event_name[irq_index], "%lu", user_val);
+ dev_attr[irq_index]->show = show_irq;
+ dev_attr[irq_index]->store = NULL;
+ dev_attr[irq_index]->attr.name = event_name[irq_index];
+ dev_attr[irq_index]->attr.mode = S_IRUGO;
+ err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ if (err < 0) {
+ printk(KERN_ERR "sysfs_create_file failed %d\n", err);
+ return err;
+ }
+
+ err = request_threaded_irq(user_val, NULL, ab8500_debug_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND, "ab8500-debug", &dev->kobj);
+ if (err < 0) {
+ printk(KERN_ERR "request_threaded_irq failed %d, %lu\n",
+ err, user_val);
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+ return err;
+ }
+
+ return buf_size;
+}
+
+static ssize_t ab8500_unsubscribe_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ unsigned int irq_index;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val < irq_first) {
+ dev_err(dev, "debugfs error input < %d\n", irq_first);
+ return -EINVAL;
+ }
+ if (user_val > irq_last) {
+ dev_err(dev, "debugfs error input > %d\n", irq_last);
+ return -EINVAL;
+ }
+
+ irq_index = user_val - irq_first;
+ if (irq_index >= num_irqs)
+ return -EINVAL;
+
+ /* Set irq count to 0 when unsubscribe */
+ irq_count[irq_index] = 0;
+
+ if (dev_attr[irq_index])
+ sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
+
+
+ free_irq(user_val, &dev->kobj);
+ kfree(event_name[irq_index]);
+ kfree(dev_attr[irq_index]);
return count;
}
+/*
+ * - several deubgfs nodes fops
+ */
+
static const struct file_operations ab8500_bank_fops = {
.open = ab8500_bank_open,
.write = ab8500_bank_write,
@@ -546,64 +1514,231 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations ab8500_interrupts_fops = {
+ .open = ab8500_interrupts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_subscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_subscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_unsubscribe_fops = {
+ .open = ab8500_subscribe_unsubscribe_open,
+ .write = ab8500_unsubscribe_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab8500_hwreg_fops = {
+ .open = ab8500_hwreg_open,
+ .write = ab8500_hwreg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static struct dentry *ab8500_dir;
-static struct dentry *ab8500_reg_file;
-static struct dentry *ab8500_bank_file;
-static struct dentry *ab8500_address_file;
-static struct dentry *ab8500_val_file;
+static struct dentry *ab8500_gpadc_dir;
static int __devinit ab8500_debug_probe(struct platform_device *plf)
{
+ struct dentry *file;
+ int ret = -ENOMEM;
+ struct ab8500 *ab8500;
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
+ ab8500 = dev_get_drvdata(plf->dev.parent);
+ num_irqs = ab8500->mask_size;
+
+ irq_count = kzalloc(sizeof(*irq_count)*num_irqs, GFP_KERNEL);
+ if (!irq_count)
+ return -ENOMEM;
+
+ dev_attr = kzalloc(sizeof(*dev_attr)*num_irqs,GFP_KERNEL);
+ if (!dev_attr)
+ goto out_freeirq_count;
+
+ event_name = kzalloc(sizeof(*event_name)*num_irqs, GFP_KERNEL);
+ if (!event_name)
+ goto out_freedev_attr;
+
+ irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
+ if (irq_first < 0) {
+ dev_err(&plf->dev, "First irq not found, err %d\n",
+ irq_first);
+ ret = irq_first;
+ goto out_freeevent_name;
+ }
+
+ irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
+ if (irq_last < 0) {
+ dev_err(&plf->dev, "Last irq not found, err %d\n",
+ irq_last);
+ ret = irq_last;
+ goto out_freeevent_name;
+ }
+
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
if (!ab8500_dir)
- goto exit_no_debugfs;
+ goto err;
+
+ ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
+ ab8500_dir);
+ if (!ab8500_gpadc_dir)
+ goto err;
+
+ file = debugfs_create_file("all-bank-registers", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("all-banks", S_IRUGO,
+ ab8500_dir, &plf->dev, &ab8500_all_banks_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-address", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("register-value", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
+ if (!file)
+ goto err;
- ab8500_reg_file = debugfs_create_file("all-bank-registers",
- S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
- if (!ab8500_reg_file)
- goto exit_destroy_dir;
+ if (is_ab8500(ab8500))
+ num_interrupt_lines = AB8500_NR_IRQS;
+ else if (is_ab8505(ab8500))
+ num_interrupt_lines = AB8505_NR_IRQS;
+ else if (is_ab9540(ab8500))
+ num_interrupt_lines = AB9540_NR_IRQS;
- ab8500_bank_file = debugfs_create_file("register-bank",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops);
- if (!ab8500_bank_file)
- goto exit_destroy_reg;
+ file = debugfs_create_file("interrupts", (S_IRUGO),
+ ab8500_dir, &plf->dev, &ab8500_interrupts_fops);
+ if (!file)
+ goto err;
- ab8500_address_file = debugfs_create_file("register-address",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev,
- &ab8500_address_fops);
- if (!ab8500_address_file)
- goto exit_destroy_bank;
+ file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+ if (!file)
+ goto err;
- ab8500_val_file = debugfs_create_file("register-value",
- (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops);
- if (!ab8500_val_file)
- goto exit_destroy_address;
+ file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUGO),
+ ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+ if (!file)
+ goto err;
return 0;
-exit_destroy_address:
- debugfs_remove(ab8500_address_file);
-exit_destroy_bank:
- debugfs_remove(ab8500_bank_file);
-exit_destroy_reg:
- debugfs_remove(ab8500_reg_file);
-exit_destroy_dir:
- debugfs_remove(ab8500_dir);
-exit_no_debugfs:
+err:
+ if (ab8500_dir)
+ debugfs_remove_recursive(ab8500_dir);
dev_err(&plf->dev, "failed to create debugfs entries.\n");
- return -ENOMEM;
+out_freeevent_name:
+ kfree(event_name);
+out_freedev_attr:
+ kfree(dev_attr);
+out_freeirq_count:
+ kfree(irq_count);
+
+ return ret;
}
static int __devexit ab8500_debug_remove(struct platform_device *plf)
{
- debugfs_remove(ab8500_val_file);
- debugfs_remove(ab8500_address_file);
- debugfs_remove(ab8500_bank_file);
- debugfs_remove(ab8500_reg_file);
- debugfs_remove(ab8500_dir);
+ debugfs_remove_recursive(ab8500_dir);
+ kfree(event_name);
+ kfree(dev_attr);
+ kfree(irq_count);
return 0;
}
diff --git a/drivers/mfd/ab8500-denc.c b/drivers/mfd/ab8500-denc.c
new file mode 100644
index 00000000000..17efee62110
--- /dev/null
+++ b/drivers/mfd/ab8500-denc.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson AB8500 DENC base driver
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/ab8500/denc-regs.h>
+#include <linux/mfd/ab8500/denc.h>
+
+#define AB8500_NAME "ab8500"
+#define AB8500_DENC_NAME "ab8500_denc"
+
+struct device_usage {
+ struct list_head list;
+ struct platform_device *pdev;
+ bool taken;
+};
+static LIST_HEAD(device_list);
+
+/* To get rid of the extra bank parameter: */
+#define AB8500_REG_BANK_NR(__reg) ((0xff00 & (__reg)) >> 8)
+static inline u8 ab8500_rreg(struct device *dev, u32 reg)
+{
+ u8 val;
+ if (abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, &val) < 0)
+ return 0;
+ else
+ return val;
+}
+
+static inline int ab8500_wreg(struct device *dev, u32 reg, u8 val)
+{
+ return abx500_set_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, val);
+}
+
+/* Only use in the macro below: */
+static inline int _ab8500_wreg_fld(struct device *dev, u32 reg, u8 val,
+ u8 mask, u8 shift)
+{
+ int ret;
+ u8 org_val;
+
+ ret = abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg),
+ reg, &org_val);
+ if (ret < 0)
+ return ret;
+ else
+ ab8500_wreg(dev, reg,
+ (org_val & ~mask) | ((val << shift) & mask));
+ return 0;
+}
+
+#define ab8500_wr_fld(__d, __reg, __fld, __val) \
+ _ab8500_wreg_fld(__d, __reg, __val, __reg##_##__fld##_MASK, \
+ __reg##_##__fld##_SHIFT)
+
+#define ab8500_set_fld(__cur_val, __reg, __fld, __val) \
+ (((__cur_val) & ~__reg##_##__fld##_MASK) | \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK))
+
+#define AB8500_DENC_TRACE(__pd) dev_dbg(&(__pd)->dev, "%s\n", __func__)
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_ab8500_denc_dir;
+static struct dentry *debugfs_ab8500_dump_regs_file;
+static void ab8500_denc_conf_ddr(struct platform_device *pdev);
+static int debugfs_ab8500_open_file(struct inode *inode, struct file *file);
+static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_ab8500_dump_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_ab8500_open_file,
+ .read = debugfs_ab8500_dump_regs,
+};
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit ab8500_denc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab8500_platform_data *ab8500_pdata =
+ dev_get_platdata(pdev->dev.parent);
+ struct ab8500_denc_platform_data *pdata;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+
+ if (ab8500_pdata == NULL) {
+ dev_err(&pdev->dev, "AB8500 platform data missing\n");
+ return -EINVAL;
+ }
+
+ pdata = ab8500_pdata->denc;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Denc platform data missing\n");
+ return -EINVAL;
+ }
+
+ device_data = kzalloc(sizeof(struct device_usage), GFP_KERNEL);
+ if (!device_data) {
+ dev_err(&pdev->dev, "Failed to allocate device data\n");
+ return -ENOMEM;
+ }
+ device_data->pdev = pdev;
+ list_add_tail(&device_data->list, &device_list);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_ab8500_denc_dir = debugfs_create_dir(pdev->name, NULL);
+ debugfs_ab8500_dump_regs_file = debugfs_create_file(
+ "dumpregs", S_IRUGO,
+ debugfs_ab8500_denc_dir, &pdev->dev,
+ &debugfs_ab8500_dump_regs_fops
+ );
+#endif /* CONFIG_DEBUG_FS */
+ return ret;
+}
+
+static int __devexit ab8500_denc_remove(struct platform_device *pdev)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(debugfs_ab8500_dump_regs_file);
+ debugfs_remove(debugfs_ab8500_denc_dir);
+#endif /* CONFIG_DEBUG_FS */
+
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (device_data->pdev == pdev) {
+ list_del(element);
+ kzfree(device_data);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver ab8500_denc_driver = {
+ .probe = ab8500_denc_probe,
+ .remove = ab8500_denc_remove,
+ .driver = {
+ .name = "ab8500-denc",
+ },
+};
+
+static void setup_27mhz(struct platform_device *pdev, bool enable)
+{
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF);
+
+ AB8500_DENC_TRACE(pdev);
+ /* TODO: check if this field needs to be set */
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_PD_ENA,
+ true);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_BUF_ENA,
+ enable);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_INV,
+ false);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_DE_IN,
+ false);
+ data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_STRE,
+ 1);
+ ab8500_wreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF, data);
+
+ data = ab8500_rreg(&pdev->dev, AB8500_SYS_CLK_CTRL);
+ data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_CLK_VALID,
+ enable);
+ data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_PLL_ENA,
+ enable);
+ ab8500_wreg(&pdev->dev, AB8500_SYS_CLK_CTRL, data);
+}
+
+static u32 map_tv_std(enum ab8500_denc_TV_std std)
+{
+ switch (std) {
+ case TV_STD_PAL_BDGHI:
+ return AB8500_DENC_CONF0_STD_PAL_BDGHI;
+ case TV_STD_PAL_N:
+ return AB8500_DENC_CONF0_STD_PAL_N;
+ case TV_STD_PAL_M:
+ return AB8500_DENC_CONF0_STD_PAL_M;
+ case TV_STD_NTSC_M:
+ return AB8500_DENC_CONF0_STD_NTSC_M;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_cr_filter(enum ab8500_denc_cr_filter_bandwidth bw)
+{
+ switch (bw) {
+ case TV_CR_NTSC_LOW_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_1MHZ;
+ case TV_CR_PAL_LOW_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_3MHZ;
+ case TV_CR_NTSC_HIGH_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_6MHZ;
+ case TV_CR_PAL_HIGH_DEF_FILTER:
+ return AB8500_DENC_CONF1_FLT_1_9MHZ;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_phase_rst_mode(enum ab8500_denc_phase_reset_mode mode)
+{
+ switch (mode) {
+ case TV_PHASE_RST_MOD_DISABLE:
+ return AB8500_DENC_CONF8_PH_RST_MODE_DISABLED;
+ case TV_PHASE_RST_MOD_FROM_PHASE_BUF:
+ return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_PHASE_BUF;
+ case TV_PHASE_RST_MOD_FROM_INC_DFS:
+ return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_INC_DFS;
+ case TV_PHASE_RST_MOD_RST:
+ return AB8500_DENC_CONF8_PH_RST_MODE_RESET;
+ default:
+ return 0;
+ }
+}
+
+static u32 map_plug_time(enum ab8500_denc_plug_time time)
+{
+ switch (time) {
+ case TV_PLUG_TIME_0_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_0_5S;
+ case TV_PLUG_TIME_1S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1S;
+ case TV_PLUG_TIME_1_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1_5S;
+ case TV_PLUG_TIME_2S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2S;
+ case TV_PLUG_TIME_2_5S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2_5S;
+ case TV_PLUG_TIME_3S:
+ return AB8500_TVOUT_CTRL_PLUG_TV_TIME_3S;
+ default:
+ return 0;
+ }
+}
+
+struct platform_device *ab8500_denc_get_device(void)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ pr_debug("%s\n", __func__);
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (!device_data->taken) {
+ device_data->taken = true;
+ return device_data->pdev;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(ab8500_denc_get_device);
+
+void ab8500_denc_put_device(struct platform_device *pdev)
+{
+ struct list_head *element;
+ struct device_usage *device_data;
+
+ AB8500_DENC_TRACE(pdev);
+ list_for_each(element, &device_list) {
+ device_data = list_entry(element, struct device_usage, list);
+ if (device_data->pdev == pdev)
+ device_data->taken = false;
+ }
+}
+EXPORT_SYMBOL(ab8500_denc_put_device);
+
+void ab8500_denc_reset(struct platform_device *pdev, bool hard)
+{
+ AB8500_DENC_TRACE(pdev);
+ if (hard) {
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_CTRL3);
+ /* reset start */
+ ab8500_wreg(&pdev->dev, AB8500_CTRL3,
+ ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 0)
+ );
+ /* reset done */
+ ab8500_wreg(&pdev->dev, AB8500_CTRL3,
+ ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 1)
+ );
+ } else {
+ ab8500_wr_fld(&pdev->dev, AB8500_DENC_CONF6, SOFT_RESET, 1);
+ mdelay(10);
+ }
+}
+EXPORT_SYMBOL(ab8500_denc_reset);
+
+void ab8500_denc_power_up(struct platform_device *pdev)
+{
+ setup_27mhz(pdev, true);
+}
+EXPORT_SYMBOL(ab8500_denc_power_up);
+
+void ab8500_denc_power_down(struct platform_device *pdev)
+{
+ setup_27mhz(pdev, false);
+}
+EXPORT_SYMBOL(ab8500_denc_power_down);
+
+void ab8500_denc_conf(struct platform_device *pdev,
+ struct ab8500_denc_conf *conf)
+{
+ u8 data;
+
+ AB8500_DENC_TRACE(pdev);
+
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF0,
+ AB8500_VAL2REG(AB8500_DENC_CONF0, STD, map_tv_std(conf->TV_std))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF0, SYNC,
+ conf->test_pattern ? AB8500_DENC_CONF0_SYNC_AUTO_TEST :
+ AB8500_DENC_CONF0_SYNC_F_BASED_SLAVE
+ )
+ );
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF1,
+ AB8500_VAL2REG(AB8500_DENC_CONF1, BLK_LI,
+ !conf->partial_blanking)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, FLT,
+ map_cr_filter(conf->cr_filter))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, CO_KI, conf->suppress_col)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF1, SETUP_MAIN,
+ conf->black_level_setup)
+ /* TODO: handle cc field: set to 0 now */
+ );
+
+ data = ab8500_rreg(&pdev->dev, AB8500_DENC_CONF2);
+ data = ab8500_set_fld(data, AB8500_DENC_CONF2, N_INTRL,
+ conf->progressive);
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF2, data);
+
+ ab8500_wreg(&pdev->dev, AB8500_DENC_CONF8,
+ AB8500_VAL2REG(AB8500_DENC_CONF8, PH_RST_MODE,
+ map_phase_rst_mode(conf->phase_reset_mode))
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF8, VAL_422_MUX,
+ conf->act_output)
+ |
+ AB8500_VAL2REG(AB8500_DENC_CONF8, BLK_ALL,
+ conf->blank_all)
+ );
+ data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL0,
+ conf->dac_enable);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL1,
+ conf->act_dc_output);
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data);
+
+ /* no support for DDR in early versions */
+ if (AB8500_REG2VAL(AB8500_REV, FULL_MASK,
+ ab8500_rreg(&pdev->dev, AB8500_REV)) > 0)
+ ab8500_denc_conf_ddr(pdev);
+}
+EXPORT_SYMBOL(ab8500_denc_conf);
+
+void ab8500_denc_conf_plug_detect(struct platform_device *pdev,
+ bool enable, bool load_RC,
+ enum ab8500_denc_plug_time time)
+{
+ u8 data;
+
+ AB8500_DENC_TRACE(pdev);
+ data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_PLUG_ON, enable);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_LOAD_RC, load_RC);
+ data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, PLUG_TV_TIME,
+ map_plug_time(time));
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data);
+}
+EXPORT_SYMBOL(ab8500_denc_conf_plug_detect);
+
+void ab8500_denc_mask_int_plug_det(struct platform_device *pdev, bool plug,
+ bool unplug)
+{
+ u8 data = ab8500_rreg(&pdev->dev, AB8500_IT_MASK1);
+
+ AB8500_DENC_TRACE(pdev);
+ data = ab8500_set_fld(data, AB8500_IT_MASK1, PLUG_TV_DET, plug);
+ data = ab8500_set_fld(data, AB8500_IT_MASK1, UNPLUG_TV_DET, unplug);
+ ab8500_wreg(&pdev->dev, AB8500_IT_MASK1, data);
+}
+EXPORT_SYMBOL(ab8500_denc_mask_int_plug_det);
+
+static void ab8500_denc_conf_ddr(struct platform_device *pdev)
+{
+ struct ab8500_platform_data *core_pdata;
+ struct ab8500_denc_platform_data *denc_pdata;
+
+ AB8500_DENC_TRACE(pdev);
+ core_pdata = dev_get_platdata(pdev->dev.parent);
+ denc_pdata = core_pdata->denc;
+ ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL2,
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2,
+ DENC_DDR, denc_pdata->ddr_enable) |
+ AB8500_VAL2REG(AB8500_TVOUT_CTRL2, SWAP_DDR_DATA_IN,
+ denc_pdata->ddr_little_endian));
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_ab8500_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUG_BUF_SIZE 900
+
+#define AB8500_GPIO_DIR5 0x1014
+#define AB8500_GPIO_DIR5_35_SHIFT 2
+#define AB8500_GPIO_DIR5_35_MASK (1 << AB8500_GPIO_DIR5_35_SHIFT)
+#define AB8500_GPIO_OUT5 0x1024
+#define AB8500_GPIO_OUT5_35_SHIFT 2
+#define AB8500_GPIO_OUT5_35_MASK (1 << AB8500_GPIO_OUT5_35_SHIFT)
+#define AB8500_GPIO_OUT5_35_VIDEO 0
+#define AB8500_GPIO_OUT5_35_AUDIO 1
+#define AB8500_GPIO_NPUD5 0x1034
+#define AB8500_GPIO_NPUD5_35_SHIFT 2
+#define AB8500_GPIO_NPUD5_35_MASK (1 << AB8500_GPIO_NPUD5_35_SHIFT)
+#define AB8500_GPIO_NPUD5_35_ACTIVE 0
+#define AB8500_GPIO_NPUD5_35_INACTIVE 1
+
+static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int ret = 0;
+ size_t data_size = 0;
+ char buffer[DEBUG_BUF_SIZE];
+ struct device *dev = file->private_data;
+
+ data_size += sprintf(buffer + data_size,
+ "AB8500 DENC registers:\n"
+ "------Regulators etc ----------\n"
+ "CTRL3 : 0x%04x = 0x%02x\n"
+ "SYSULPCLK_CONF: 0x%04x = 0x%02x\n"
+ "SYSCLK_CTRL : 0x%04x = 0x%02x\n"
+ "REGU_MISC1 : 0x%04x = 0x%02x\n"
+ "VAUX12_REGU : 0x%04x = 0x%02x\n"
+ "VAUX1_SEL1 : 0x%04x = 0x%02x\n"
+ "------TVout only --------------\n"
+ "DENC_CONF0 : 0x%04x = 0x%02x\n"
+ "DENC_CONF1 : 0x%04x = 0x%02x\n"
+ "DENC_CONF2 : 0x%04x = 0x%02x\n"
+ "DENC_CONF6 : 0x%04x = 0x%02x\n"
+ "DENC_CONF8 : 0x%04x = 0x%02x\n"
+ "TVOUT_CTRL : 0x%04x = 0x%02x\n"
+ "TVOUT_CTRL2 : 0x%04x = 0x%02x\n"
+ "IT_MASK1 : 0x%04x = 0x%02x\n"
+ "------AV connector-------------\n"
+ "GPIO_DIR5 : 0x%04x = 0x%02x\n"
+ "GPIO_OUT5 : 0x%04x = 0x%02x\n"
+ "GPIO_NPUD5 : 0x%04x = 0x%02x\n"
+ ,
+ AB8500_CTRL3, ab8500_rreg(dev, AB8500_CTRL3),
+ AB8500_SYS_ULP_CLK_CONF, ab8500_rreg(dev,
+ AB8500_SYS_ULP_CLK_CONF),
+ AB8500_SYS_CLK_CTRL, ab8500_rreg(dev, AB8500_SYS_CLK_CTRL),
+ AB8500_REGU_MISC1, ab8500_rreg(dev, AB8500_REGU_MISC1),
+ AB8500_VAUX12_REGU, ab8500_rreg(dev, AB8500_VAUX12_REGU),
+ AB8500_VAUX1_SEL, ab8500_rreg(dev, AB8500_VAUX1_SEL),
+ AB8500_DENC_CONF0, ab8500_rreg(dev, AB8500_DENC_CONF0),
+ AB8500_DENC_CONF1, ab8500_rreg(dev, AB8500_DENC_CONF1),
+ AB8500_DENC_CONF2, ab8500_rreg(dev, AB8500_DENC_CONF2),
+ AB8500_DENC_CONF6, ab8500_rreg(dev, AB8500_DENC_CONF6),
+ AB8500_DENC_CONF8, ab8500_rreg(dev, AB8500_DENC_CONF8),
+ AB8500_TVOUT_CTRL, ab8500_rreg(dev, AB8500_TVOUT_CTRL),
+ AB8500_TVOUT_CTRL2, ab8500_rreg(dev, AB8500_TVOUT_CTRL2),
+ AB8500_IT_MASK1, ab8500_rreg(dev, AB8500_IT_MASK1),
+ AB8500_GPIO_DIR5, ab8500_rreg(dev, AB8500_GPIO_DIR5),
+ AB8500_GPIO_OUT5, ab8500_rreg(dev, AB8500_GPIO_OUT5),
+ AB8500_GPIO_NPUD5, ab8500_rreg(dev, AB8500_GPIO_NPUD5)
+ );
+ if (data_size >= DEBUG_BUF_SIZE) {
+ printk(KERN_EMERG "AB8500 DENC: Buffer overrun\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* check if read done */
+ if (*f_pos > data_size)
+ goto out;
+
+ if (*f_pos + count > data_size)
+ count = data_size - *f_pos;
+
+ if (copy_to_user(buf, buffer + *f_pos, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+out:
+ return ret;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module init */
+static int __init ab8500_denc_init(void)
+{
+ return platform_driver_register(&ab8500_denc_driver);
+}
+module_init(ab8500_denc_init);
+
+static void __exit ab8500_denc_exit(void)
+{
+ platform_driver_unregister(&ab8500_denc_driver);
+}
+module_exit(ab8500_denc_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson AB8500 DENC driver");
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc716e1d..d06f4826619 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
@@ -82,6 +83,11 @@
/* This is used to not lose precision when dividing to get gain and offset */
#define CALIB_SCALE 1000
+/* Time in ms before disabling regulator */
+#define GPADC_AUDOSUSPEND_DELAY 1
+
+#define CONVERSION_TIME 500 /* ms */
+
enum cal_channels {
ADC_INPUT_VMAIN = 0,
ADC_INPUT_BTEMP,
@@ -102,10 +108,10 @@ struct adc_cal_data {
/**
* struct ab8500_gpadc - AB8500 GPADC device information
- * @chip_id ABB chip id
* @dev: pointer to the struct device
* @node: a list of AB8500 GPADCs, hence prepared for
reentrance
+ * @parent: pointer to the struct ab8500
* @ab8500_gpadc_complete: pointer to the struct completion, to indicate
* the completion of gpadc conversion
* @ab8500_gpadc_lock: structure of type mutex
@@ -114,9 +120,9 @@ struct adc_cal_data {
* @cal_data array of ADC calibration data structs
*/
struct ab8500_gpadc {
- u8 chip_id;
struct device *dev;
struct list_head node;
+ struct ab8500 *parent;
struct completion ab8500_gpadc_complete;
struct mutex ab8500_gpadc_lock;
struct regulator *regu;
@@ -282,8 +288,9 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
return -ENODEV;
mutex_lock(&gpadc->ab8500_gpadc_lock);
+
/* Enable VTVout LDO this is required for GPADC */
- regulator_enable(gpadc->regu);
+ pm_runtime_get_sync(gpadc->dev);
/* Check if ADC is not busy, lock and proceed */
do {
@@ -332,7 +339,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
EN_BUF | EN_ICHAR);
break;
case BTEMP_BALL:
- if (gpadc->chip_id >= AB8500_CUT3P0) {
+ if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
/* Turn on btemp pull-up on ABB 3.0 */
ret = abx500_mask_and_set_register_interruptible(
gpadc->dev,
@@ -344,7 +351,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
* Delay might be needed for ABB8500 cut 3.0, if not, remove
* when hardware will be availible
*/
- msleep(1);
+ mdelay(1);
break;
}
/* Intentional fallthrough */
@@ -367,7 +374,8 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
goto out;
}
/* wait for completion of conversion */
- if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, 2*HZ)) {
+ if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
+ msecs_to_jiffies(CONVERSION_TIME))) {
dev_err(gpadc->dev,
"timeout: didn't receive GPADC conversion interrupt\n");
ret = -EINVAL;
@@ -397,8 +405,10 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
goto out;
}
- /* Disable VTVout LDO this is required for GPADC */
- regulator_disable(gpadc->regu);
+
+ pm_runtime_mark_last_busy(gpadc->dev);
+ pm_runtime_put_autosuspend(gpadc->dev);
+
mutex_unlock(&gpadc->ab8500_gpadc_lock);
return (high_data << 8) | low_data;
@@ -412,7 +422,9 @@ out:
*/
(void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- regulator_disable(gpadc->regu);
+
+ pm_runtime_put(gpadc->dev);
+
mutex_unlock(&gpadc->ab8500_gpadc_lock);
dev_err(gpadc->dev,
"gpadc_conversion: Failed to AD convert channel %d\n", channel);
@@ -571,6 +583,28 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
gpadc->cal_data[ADC_INPUT_VBAT].offset);
}
+static int ab8500_gpadc_runtime_suspend(struct device *dev)
+{
+ struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ regulator_disable(gpadc->regu);
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_resume(struct device *dev)
+{
+ struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+ regulator_enable(gpadc->regu);
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_idle(struct device *dev)
+{
+ pm_runtime_suspend(dev);
+ return 0;
+}
+
static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -591,6 +625,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
}
gpadc->dev = &pdev->dev;
+ gpadc->parent = dev_get_drvdata(pdev->dev.parent);
mutex_init(&gpadc->ab8500_gpadc_lock);
/* Initialize completion used to notify completion of conversion */
@@ -606,14 +641,6 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
goto fail;
}
- /* Get Chip ID of the ABB ASIC */
- ret = abx500_get_chip_id(gpadc->dev);
- if (ret < 0) {
- dev_err(gpadc->dev, "failed to get chip ID\n");
- goto fail_irq;
- }
- gpadc->chip_id = (u8) ret;
-
/* VTVout LDO used to power up ab8500-GPADC */
gpadc->regu = regulator_get(&pdev->dev, "vddadc");
if (IS_ERR(gpadc->regu)) {
@@ -621,6 +648,16 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
dev_err(gpadc->dev, "failed to get vtvout LDO\n");
goto fail_irq;
}
+
+ platform_set_drvdata(pdev, gpadc);
+
+ regulator_enable(gpadc->regu);
+
+ pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gpadc->dev);
+ pm_runtime_set_active(gpadc->dev);
+ pm_runtime_enable(gpadc->dev);
+
ab8500_gpadc_read_calibration_data(gpadc);
list_add_tail(&gpadc->node, &ab8500_gpadc_list);
dev_dbg(gpadc->dev, "probe success\n");
@@ -641,19 +678,34 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
list_del(&gpadc->node);
/* remove interrupt - completion of Sw ADC conversion */
free_irq(gpadc->irq, gpadc);
- /* disable VTVout LDO that is being used by GPADC */
- regulator_put(gpadc->regu);
+
+ pm_runtime_get_sync(gpadc->dev);
+ pm_runtime_disable(gpadc->dev);
+
+ regulator_disable(gpadc->regu);
+
+ pm_runtime_set_suspended(gpadc->dev);
+
+ pm_runtime_put_noidle(gpadc->dev);
+
kfree(gpadc);
gpadc = NULL;
return 0;
}
+static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
+ SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
+ ab8500_gpadc_runtime_resume,
+ ab8500_gpadc_runtime_idle)
+};
+
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = __devexit_p(ab8500_gpadc_remove),
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
+ .pm = &ab8500_gpadc_pm_ops,
},
};
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index b83045f102b..5ee90fd125e 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -13,6 +13,7 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/dbx500-prcmu.h>
+
static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
{
int ret;
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff..d5865d41514 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,12 +7,114 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/signal.h>
+#include <linux/power_supply.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/time.h>
+#include <linux/hwmon.h>
static struct device *sysctrl_dev;
+void ab8500_power_off(void)
+{
+ struct ab8500_platform_data *plat;
+ struct timespec ts;
+ sigset_t old;
+ sigset_t all;
+ static char *pss[] = {"ab8500_ac", "ab8500_usb"};
+ int i;
+ bool charger_present = false;
+ union power_supply_propval val;
+ struct power_supply *psy;
+ int ret;
+
+ /*
+ * If we have a charger connected and we're powering off,
+ * reboot into charge-only mode.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(pss); i++) {
+ psy = power_supply_get_by_name(pss[i]);
+ if (!psy)
+ continue;
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
+
+ if (!ret && val.intval) {
+ charger_present = true;
+ break;
+ }
+ }
+
+ if (!charger_present)
+ goto shutdown;
+
+ /* Check if battery is known */
+ psy = power_supply_get_by_name("ab8500_btemp");
+ if (psy) {
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY,
+ &val);
+ if (!ret && val.intval != POWER_SUPPLY_TECHNOLOGY_UNKNOWN) {
+ printk(KERN_INFO
+ "Charger \"%s\" is connected with known battery."
+ " Rebooting.\n",
+ pss[i]);
+ machine_restart("charging");
+ }
+ }
+
+shutdown:
+ sigfillset(&all);
+
+ plat = dev_get_platdata(sysctrl_dev->parent);
+ getnstimeofday(&ts);
+ if (!sigprocmask(SIG_BLOCK, &all, &old)) {
+ if (ts.tv_sec == 0 ||
+ (ts.tv_sec - plat->thermal_set_time_sec >
+ plat->thermal_time_out))
+ plat->thermal_power_off_pending = false;
+ if (!plat->thermal_power_off_pending) {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ } else {
+ (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1,
+ AB8500_STW4500CTRL1_THDB8500SWOFF |
+ AB8500_STW4500CTRL1_SWRESET4500N);
+ (void)sigprocmask(SIG_SETMASK, &old, NULL);
+ }
+ }
+}
+
+static int ab8500_notifier_call(struct notifier_block *this,
+ unsigned long val, void *data)
+{
+ struct ab8500_platform_data *plat;
+ static struct timespec ts;
+ if (sysctrl_dev == NULL)
+ return -EAGAIN;
+
+ plat = dev_get_platdata(sysctrl_dev->parent);
+ if (val) {
+ getnstimeofday(&ts);
+ plat->thermal_set_time_sec = ts.tv_sec;
+ plat->thermal_power_off_pending = true;
+ } else {
+ plat->thermal_set_time_sec = 0;
+ plat->thermal_power_off_pending = false;
+ }
+ return 0;
+}
+
+static struct notifier_block ab8500_notifier = {
+ .notifier_call = ab8500_notifier_call,
+};
+
static inline bool valid_bank(u8 bank)
{
return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
@@ -33,6 +135,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
return abx500_get_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_read);
int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
{
@@ -48,10 +151,42 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
}
+EXPORT_SYMBOL(ab8500_sysctrl_write);
static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev)
{
+ struct ab8500_platform_data *plat;
+ struct ab8500_sysctrl_platform_data *pdata;
+
sysctrl_dev = &pdev->dev;
+ plat = dev_get_platdata(pdev->dev.parent);
+ if (plat->pm_power_off)
+ pm_power_off = ab8500_power_off;
+ hwmon_notifier_register(&ab8500_notifier);
+
+ pdata = plat->sysctrl;
+
+ if (pdata) {
+ int ret;
+ int i;
+ int j;
+ for (i = AB8500_SYSCLKREQ1RFCLKBUF;
+ i <= AB8500_SYSCLKREQ8RFCLKBUF; i++) {
+ j = i - AB8500_SYSCLKREQ1RFCLKBUF;
+ ret = ab8500_sysctrl_write(i, 0xff,
+ pdata->initial_req_buf_config[j]);
+ dev_dbg(&pdev->dev,
+ "Setting SysClkReq%dRfClkBuf 0x%X\n",
+ j + 1,
+ pdata->initial_req_buf_config[j]);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "unable to set sysClkReq%dRfClkBuf: "
+ "%d\n", j + 1, ret);
+ }
+ }
+ }
+
return 0;
}
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index 7ce65f49480..9818afba251 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -153,6 +153,22 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
}
EXPORT_SYMBOL(abx500_startup_irq_enabled);
+void abx500_dump_all_banks(void)
+{
+ struct abx500_ops *ops;
+ struct device dummy_child = {0};
+ struct abx500_device_entry *dev_entry;
+
+ list_for_each_entry(dev_entry, &abx500_list, list) {
+ dummy_child.parent = dev_entry->dev;
+ ops = &dev_entry->ops;
+
+ if ((ops != NULL) && (ops->dump_all_banks != NULL))
+ ops->dump_all_banks(&dummy_child);
+ }
+}
+EXPORT_SYMBOL(abx500_dump_all_banks);
+
MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
MODULE_DESCRIPTION("ABX500 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/db5500-prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h
new file mode 100644
index 00000000000..0428b5e95ae
--- /dev/null
+++ b/drivers/mfd/db5500-prcmu-regs.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __MACH_PRCMU_REGS_DB5500_H
+#define __MACH_PRCMU_REGS_DB5500_H
+
+#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
+
+#define PRCM_TCR 0x1C8
+#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
+#define PRCM_TCR_STOP_TIMERS BIT(16)
+#define PRCM_TCR_DOZE_MODE BIT(17)
+
+/* PRCMU HW semaphore */
+#define PRCM_SEM 0x400
+#define PRCM_SEM_PRCM_SEM BIT(0)
+
+#define DB5500_PRCM_ACLK_MGT 0x004
+#define DB5500_PRCM_SVACLK_MGT 0x008
+#define DB5500_PRCM_SIACLK_MGT 0x00C
+#define DB5500_PRCM_SGACLK_MGT 0x014
+#define DB5500_PRCM_UARTCLK_MGT 0x018
+#define DB5500_PRCM_MSP02CLK_MGT 0x01C
+#define DB5500_PRCM_I2CCLK_MGT 0x020
+#define DB5500_PRCM_SDMMCCLK_MGT 0x024
+#define DB5500_PRCM_PER1CLK_MGT 0x02C
+#define DB5500_PRCM_PER2CLK_MGT 0x030
+#define DB5500_PRCM_PER3CLK_MGT 0x034
+#define DB5500_PRCM_PER5CLK_MGT 0x038
+#define DB5500_PRCM_PER6CLK_MGT 0x03C
+#define DB5500_PRCM_IRDACLK_MGT 0x040
+#define DB5500_PRCM_PWMCLK_MGT 0x044
+#define DB5500_PRCM_SPARE1CLK_MGT 0x048
+#define DB5500_PRCM_IRRCCLK_MGT 0x04C
+#define DB5500_PRCM_HDMICLK_MGT 0x058
+#define DB5500_PRCM_APEATCLK_MGT 0x05C
+#define DB5500_PRCM_APETRACECLK_MGT 0x060
+#define DB5500_PRCM_MCDECLK_MGT 0x064
+#define DB5500_PRCM_DSIALTCLK_MGT 0x06C
+#define DB5500_PRCM_DMACLK_MGT 0x074
+#define DB5500_PRCM_B2R2CLK_MGT 0x078
+#define DB5500_PRCM_TVCLK_MGT 0x07C
+#define DB5500_PRCM_RNGCLK_MGT 0x284
+
+#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
+#define PRCM_CLK_MGT_CLKPLLDIV_SHIFT 0
+#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
+#define PRCM_CLK_MGT_CLKEN BIT(8)
+
+#define PRCM_ARM_IT1_CLEAR 0x48C
+#define PRCM_ARM_IT1_VAL 0x494
+
+/* CPU mailbox registers */
+#define PRCM_MBOX_CPU_VAL 0x0FC
+#define PRCM_MBOX_CPU_SET 0x100
+
+/* System reset register */
+#define PRCM_APE_SOFTRST 0x228
+
+/* PRCMU clock/PLL/reset registers */
+#define PRCM_PLLDSI_FREQ 0x500
+#define PRCM_PLLDSI_ENABLE 0x504
+#define PRCM_PLLDSI_LOCKP 0x508
+#define PRCM_DSI_PLLOUT_SEL 0x530
+#define PRCM_DSITVCLK_DIV 0x52C
+#define PRCM_APE_RESETN_SET 0x1E4
+#define PRCM_APE_RESETN_CLR 0x1E8
+
+/* CLKOUTx SEL0 settings */
+#define CLKOUT_SEL0_REF_CLK 0x01 /* 0b 0001 */
+#define CLKOUT_SEL0_RTC_CLK0 0x02 /* 0b 0010 */
+#define CLKOUT_SEL0_ULP_CLK 0x04 /* 0b 0100 */
+#define CLKOUT_SEL0_SEL_CLK 0x08 /* 0b 1000 */
+
+/* CLKOUTx SEL settings */
+#define CLKOUT_SEL_STATIC0 0x0001 /* 0b 00 0000 0001 */
+#define CLKOUT_SEL_REFCLK 0x0002 /* 0b 00 0000 0010 */
+#define CLKOUT_SEL_ULPCLK 0x0004 /* 0b 00 0000 0100 */
+#define CLKOUT_SEL_ARMCLK 0x0008 /* 0b 00 0000 1000 */
+#define CLKOUT_SEL_SYSACC0CLK 0x0010 /* 0b 00 0001 0000 */
+#define CLKOUT_SEL_SOC0PLLCLK 0x0020 /* 0b 00 0010 0000 */
+#define CLKOUT_SEL_SOC1PLLCLK 0x0040 /* 0b 00 0100 0000 */
+#define CLKOUT_SEL_DDRPLLCLK 0x0080 /* 0b 00 1000 0000 */
+#define CLKOUT_SEL_TVCLK 0x0100 /* 0b 01 0000 0000 */
+#define CLKOUT_SEL_IRDACLK 0x0200 /* 0b 10 0000 0000 */
+
+/* CLKOUTx dividers */
+#define CLKOUT_DIV_2 0x00 /* 0b 000 */
+#define CLKOUT_DIV_4 0x01 /* 0b 001 */
+#define CLKOUT_DIV_8 0x02 /* 0b 010 */
+#define CLKOUT_DIV_16 0x03 /* 0b 011 */
+#define CLKOUT_DIV_32 0x04 /* 0b 100 */
+#define CLKOUT_DIV_64 0x05 /* 0b 101 */
+/* Values 0x06 and 0x07 will also set the CLKOUTx divider to 64. */
+
+/* PRCM_CLKOCR CLKOUTx Control registers */
+#define PRCM_CLKOCR 0x1CC
+#define PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT 0
+#define PRCM_CLKOCR_CLKOUT0_SEL0_MASK BITS(0, 3)
+#define PRCM_CLKOCR_CLKOUT0_SEL_SHIFT 4
+#define PRCM_CLKOCR_CLKOUT0_SEL_MASK BITS(4, 13)
+#define PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT 16
+#define PRCM_CLKOCR_CLKOUT1_SEL0_MASK BITS(16, 19)
+#define PRCM_CLKOCR_CLKOUT1_SEL_SHIFT 20
+#define PRCM_CLKOCR_CLKOUT1_SEL_MASK BITS(20, 29)
+
+/* PRCM_CLKODIV CLKOUTx Dividers */
+#define PRCM_CLKODIV 0x188
+#define PRCM_CLKODIV_CLKOUT0_DIV_SHIFT 0
+#define PRCM_CLKODIV_CLKOUT0_DIV_MASK BITS(0, 2)
+#define PRCM_CLKODIV_CLKOUT1_DIV_SHIFT 16
+#define PRCM_CLKODIV_CLKOUT1_DIV_MASK BITS(16, 18)
+
+#define PRCM_MMIP_LS_CLAMP_SET 0x420
+#define PRCM_MMIP_LS_CLAMP_CLR 0x424
+#define PRCM_DDR_SUBSYS_APE_MINBW 0x438
+
+/* Miscellaneous unit registers */
+#define PRCM_DSI_SW_RESET 0x324
+#define PRCM_RESOUTN_SET_OFFSET 0x214
+#define PRCM_RESOUTN_CLR_OFFSET 0x218
+
+/* APE - Modem Registers */
+#define PRCM_HOSTACCESS_REQ 0x334
+/* APE - Modem register bit maipulation */
+#define PRCM_HOSTACCESS_REQ_BIT BIT(0)
+#define PRCM_APE_ACK 0x49c
+#define PRCM_APE_ACK_BIT 0x01
+
+/* Watchdog - mtimer registers */
+#define PRCM_TIMER0_RTOS_COMP1_OFFSET 0x4C
+#define PRCM_TIMER0_RTOS_COUNTER_OFFSET 0x40
+#define PRCM_TIMER0_IRQ_EN_SET_OFFSET 0x70
+#define PRCM_TIMER0_IRQ_EN_CLR_OFFSET 0x6C
+#define PRCM_TIMER0_IRQ_RTOS1_SET 0x08
+#define PRCM_TIMER0_IRQ_RTOS1_CLR 0x08
+
+#endif
diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c
index bb115b2f04e..b106632d03c 100644
--- a/drivers/mfd/db5500-prcmu.c
+++ b/drivers/mfd/db5500-prcmu.c
@@ -19,12 +19,21 @@
#include <linux/irq.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/regulator/db5500-prcmu.h>
+#include <linux/regulator/machine.h>
#include <linux/interrupt.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db5500-regs.h>
-#include "dbx500-prcmu-regs.h"
+#include <mach/prcmu-debug.h>
+
+#include "db5500-prcmu-regs.h"
+
+#define PRCMU_FW_VERSION_OFFSET 0xA4
+#define PRCM_SW_RST_REASON (tcdm_base + 0xFF8) /* 2 bytes */
#define _PRCM_MB_HEADER (tcdm_base + 0xFE8)
#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0)
@@ -64,6 +73,52 @@
#define PRCM_ACK_MB6 (tcdm_base + 0xF0C)
#define PRCM_ACK_MB7 (tcdm_base + 0xF08)
+/* Share info */
+#define PRCM_SHARE_INFO (tcdm_base + 0xEC8)
+
+#define PRCM_SHARE_INFO_HOTDOG (PRCM_SHARE_INFO + 62)
+
+/* Mailbox 0 REQs */
+#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
+#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x1)
+#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x2)
+#define PRCM_REQ_MB0_DDR_STATE (PRCM_REQ_MB0 + 0x3)
+#define PRCM_REQ_MB0_ESRAM0_STATE (PRCM_REQ_MB0 + 0x4)
+#define PRCM_REQ_MB0_WAKEUP_DBB (PRCM_REQ_MB0 + 0x8)
+#define PRCM_REQ_MB0_WAKEUP_ABB (PRCM_REQ_MB0 + 0xC)
+
+/* Mailbox 0 ACKs */
+#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
+#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
+#define PRCM_ACK_MB0_WAKEUP_0_DBB (PRCM_ACK_MB0 + 0x4)
+#define PRCM_ACK_MB0_WAKEUP_0_ABB (PRCM_ACK_MB0 + 0x8)
+#define PRCM_ACK_MB0_WAKEUP_1_DBB (PRCM_ACK_MB0 + 0x28)
+#define PRCM_ACK_MB0_WAKEUP_1_ABB (PRCM_ACK_MB0 + 0x2C)
+#define PRCM_ACK_MB0_EVENT_ABB_NUMBERS 20
+
+/* Request mailbox 1 fields. */
+#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
+#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
+
+/* Mailbox 1 ACKs */
+#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
+#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
+#define PRCM_ACK_MB1_ARM_VOLT_STATUS (PRCM_ACK_MB1 + 0x2)
+#define PRCM_ACK_MB1_APE_VOLT_STATUS (PRCM_ACK_MB1 + 0x3)
+
+/* Mailbox 2 REQs */
+#define PRCM_REQ_MB2_EPOD_CLIENT (PRCM_REQ_MB2 + 0x0)
+#define PRCM_REQ_MB2_EPOD_STATE (PRCM_REQ_MB2 + 0x1)
+#define PRCM_REQ_MB2_CLK_CLIENT (PRCM_REQ_MB2 + 0x2)
+#define PRCM_REQ_MB2_CLK_STATE (PRCM_REQ_MB2 + 0x3)
+#define PRCM_REQ_MB2_PLL_CLIENT (PRCM_REQ_MB2 + 0x4)
+#define PRCM_REQ_MB2_PLL_STATE (PRCM_REQ_MB2 + 0x5)
+
+/* Mailbox 2 ACKs */
+#define PRCM_ACK_MB2_EPOD_STATUS (PRCM_ACK_MB2 + 0x2)
+#define PRCM_ACK_MB2_CLK_STATUS (PRCM_ACK_MB2 + 0x6)
+#define PRCM_ACK_MB2_PLL_STATUS (PRCM_ACK_MB2 + 0xA)
+
enum mb_return_code {
RC_SUCCESS,
RC_FAIL,
@@ -71,12 +126,58 @@ enum mb_return_code {
/* Mailbox 0 headers. */
enum mb0_header {
- /* request */
- RMB0H_PWR_STATE_TRANS = 1,
- RMB0H_WAKE_UP_CFG,
- RMB0H_RD_WAKE_UP_ACK,
/* acknowledge */
- AMB0H_WAKE_UP = 1,
+ MB0H_WAKE_UP = 0,
+ /* request */
+ MB0H_PWR_STATE_TRANS,
+ MB0H_WAKE_UP_CFG,
+ MB0H_RD_WAKE_UP_ACK,
+};
+
+/* Mailbox 1 headers.*/
+enum mb1_header {
+ MB1H_ARM_OPP = 1,
+ MB1H_APE_OPP,
+ MB1H_ARM_APE_OPP,
+};
+
+/* Mailbox 2 headers. */
+enum mb2_header {
+ MB2H_EPOD_REQUEST = 1,
+ MB2H_CLK_REQUEST,
+ MB2H_PLL_REQUEST,
+};
+
+/* Mailbox 3 headers. */
+enum mb3_header {
+ MB3H_REFCLK_REQUEST = 1,
+};
+
+enum sysclk_state {
+ SYSCLK_OFF,
+ SYSCLK_ON,
+};
+
+/* Mailbox 4 headers */
+enum mb4_header {
+ MB4H_CFG_HOTDOG = 7,
+ MB4H_CFG_HOTMON = 8,
+ MB4H_CFG_HOTPERIOD = 10,
+ MB4H_CGF_MODEM_RESET = 13,
+ MB4H_CGF_A9WDOG_EN_PREBARK = 14,
+ MB4H_CGF_A9WDOG_EN_NOPREBARK = 15,
+ MB4H_CGF_A9WDOG_DIS = 16,
+};
+
+/* Mailbox 4 ACK headers */
+enum mb4_ack_header {
+ MB4H_ACK_CFG_HOTDOG = 5,
+ MB4H_ACK_CFG_HOTMON = 6,
+ MB4H_ACK_CFG_HOTPERIOD = 8,
+ MB4H_ACK_CFG_MODEM_RESET = 11,
+ MB4H_ACK_CGF_A9WDOG_EN_PREBARK = 12,
+ MB4H_ACK_CGF_A9WDOG_EN_NOPREBARK = 13,
+ MB4H_ACK_CGF_A9WDOG_DIS = 14,
};
/* Mailbox 5 headers. */
@@ -85,6 +186,69 @@ enum mb5_header {
MB5H_I2C_READ,
};
+enum db5500_arm_opp {
+ DB5500_ARM_100_OPP = 1,
+ DB5500_ARM_50_OPP,
+ DB5500_ARM_EXT_OPP,
+};
+
+enum db5500_ape_opp {
+ DB5500_APE_100_OPP = 1,
+ DB5500_APE_50_OPP
+};
+
+enum epod_state {
+ EPOD_OFF,
+ EPOD_ON,
+};
+enum epod_onoffret_state {
+ EPOD_OOR_OFF,
+ EPOD_OOR_RET,
+ EPOD_OOR_ON,
+};
+enum db5500_prcmu_pll {
+ DB5500_PLL_SOC0,
+ DB5500_PLL_SOC1,
+ DB5500_PLL_DDR,
+ DB5500_NUM_PLL_ID,
+};
+
+enum db5500_prcmu_clk {
+ DB5500_MSP1CLK,
+ DB5500_CDCLK,
+ DB5500_IRDACLK,
+ DB5500_TVCLK,
+ DB5500_NUM_CLK_CLIENTS,
+};
+
+enum on_off_ret {
+ OFF_ST,
+ RET_ST,
+ ON_ST,
+};
+
+enum db5500_ap_pwr_state {
+ DB5500_AP_SLEEP = 2,
+ DB5500_AP_DEEP_SLEEP,
+ DB5500_AP_IDLE,
+};
+
+/* Request mailbox 3 fields */
+#define PRCM_REQ_MB3_REFCLK_MGT (PRCM_REQ_MB3 + 0x0)
+
+/* Ack. mailbox 3 fields */
+#define PRCM_ACK_MB3_REFCLK_REQ (PRCM_ACK_MB3 + 0x0)
+
+
+/* Request mailbox 4 fields */
+#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 32)
+#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 34)
+#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 36)
+#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 38)
+
+/* Ack. mailbox 4 field */
+#define PRCM_ACK_MB4_REQUESTS (PRCM_ACK_MB4 + 0x0)
+
/* Request mailbox 5 fields. */
#define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1)
@@ -105,11 +269,12 @@ enum mb5_header {
#define PRCMU_RESET_DSIPLL 0x00004000
#define PRCMU_UNCLAMP_DSIPLL 0x00400800
-/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0x8, = 50 Mhz*/
-#define PRCMU_DSI_CLOCK_SETTING 0x00000128
+/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0xC, = 33.33 Mhz*/
+#define PRCMU_DSI_CLOCK_SETTING 0x0000012C
/* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */
#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135
-#define PRCMU_PLLDSI_FREQ_SETTING 0x00020121
+/* PRCM_PLLDSI_FREQ R=4, N=1, D= 0x65 */
+#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002
#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000201
#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101
@@ -125,13 +290,176 @@ enum mb5_header {
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
/*
+ * Wakeups/IRQs
+ */
+
+#define WAKEUP_BIT_RTC BIT(0)
+#define WAKEUP_BIT_RTT0 BIT(1)
+#define WAKEUP_BIT_RTT1 BIT(2)
+#define WAKEUP_BIT_CD_IRQ BIT(3)
+#define WAKEUP_BIT_SRP_TIM BIT(4)
+#define WAKEUP_BIT_APE_REQ BIT(5)
+#define WAKEUP_BIT_USB BIT(6)
+#define WAKEUP_BIT_ABB BIT(7)
+#define WAKEUP_BIT_LOW_POWER_AUDIO BIT(8)
+#define WAKEUP_BIT_TEMP_SENSOR_LOW BIT(9)
+#define WAKEUP_BIT_ARM BIT(10)
+#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
+#define WAKEUP_BIT_TEMP_SENSOR_HIGH BIT(12)
+#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
+#define WAKEUP_BIT_GPIO0 BIT(23)
+#define WAKEUP_BIT_GPIO1 BIT(24)
+#define WAKEUP_BIT_GPIO2 BIT(25)
+#define WAKEUP_BIT_GPIO3 BIT(26)
+#define WAKEUP_BIT_GPIO4 BIT(27)
+#define WAKEUP_BIT_GPIO5 BIT(28)
+#define WAKEUP_BIT_GPIO6 BIT(29)
+#define WAKEUP_BIT_GPIO7 BIT(30)
+#define WAKEUP_BIT_AC_REL_ACK BIT(30)
+
+/*
+ * This vector maps irq numbers to the bits in the bit field used in
+ * communication with the PRCMU firmware.
+ *
+ * The reason for having this is to keep the irq numbers contiguous even though
+ * the bits in the bit field are not. (The bits also have a tendency to move
+ * around, to further complicate matters.)
+ */
+#define IRQ_INDEX(_name) ((IRQ_DB5500_PRCMU_##_name) - IRQ_DB5500_PRCMU_BASE)
+#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
+static u32 prcmu_irq_bit[NUM_DB5500_PRCMU_WAKEUPS] = {
+ IRQ_ENTRY(RTC),
+ IRQ_ENTRY(RTT0),
+ IRQ_ENTRY(RTT1),
+ IRQ_ENTRY(CD_IRQ),
+ IRQ_ENTRY(SRP_TIM),
+ IRQ_ENTRY(APE_REQ),
+ IRQ_ENTRY(USB),
+ IRQ_ENTRY(ABB),
+ IRQ_ENTRY(LOW_POWER_AUDIO),
+ IRQ_ENTRY(TEMP_SENSOR_LOW),
+ IRQ_ENTRY(TEMP_SENSOR_HIGH),
+ IRQ_ENTRY(ARM),
+ IRQ_ENTRY(AC_WAKE_ACK),
+ IRQ_ENTRY(MODEM_SW_RESET_REQ),
+ IRQ_ENTRY(GPIO0),
+ IRQ_ENTRY(GPIO1),
+ IRQ_ENTRY(GPIO2),
+ IRQ_ENTRY(GPIO3),
+ IRQ_ENTRY(GPIO4),
+ IRQ_ENTRY(GPIO5),
+ IRQ_ENTRY(GPIO6),
+ IRQ_ENTRY(GPIO7),
+ IRQ_ENTRY(AC_REL_ACK),
+};
+
+#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
+#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
+static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
+ WAKEUP_ENTRY(RTC),
+ WAKEUP_ENTRY(RTT0),
+ WAKEUP_ENTRY(RTT1),
+ WAKEUP_ENTRY(CD_IRQ),
+ WAKEUP_ENTRY(USB),
+ WAKEUP_ENTRY(ABB),
+ WAKEUP_ENTRY(ARM)
+};
+
+/*
* mb0_transfer - state needed for mailbox 0 communication.
- * @lock: The transaction lock.
+ * @lock The transaction lock.
+ * @dbb_irqs_lock lock used for (un)masking DBB wakeup interrupts
+ * @mask_work: Work structure used for (un)masking wakeup interrupts.
+ * @ac_wake_lock: mutex to lock modem_req and modem_rel
+ * @req: Request data that need to persist between requests.
*/
static struct {
spinlock_t lock;
+ spinlock_t dbb_irqs_lock;
+ struct work_struct mask_work;
+ struct mutex ac_wake_lock;
+ struct {
+ u32 dbb_irqs;
+ u32 dbb_wakeups;
+ u32 abb_events;
+ } req;
} mb0_transfer;
+
+/*
+ * mb1_transfer - state needed for mailbox 1 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @req_arm_opp Requested arm opp
+ * @req_ape_opp Requested ape opp
+ * @ack: Reply ("acknowledge") data.
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ u8 req_arm_opp;
+ u8 req_ape_opp;
+ struct {
+ u8 header;
+ u8 arm_opp;
+ u8 ape_opp;
+ u8 arm_voltage_st;
+ u8 ape_voltage_st;
+ } ack;
+} mb1_transfer;
+
+/*
+ * mb2_transfer - state needed for mailbox 2 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @req: Request data that need to persist between requests.
+ * @ack: Reply ("acknowledge") data.
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ struct {
+ u8 epod_st[DB5500_NUM_EPOD_ID];
+ u8 pll_st[DB5500_NUM_PLL_ID];
+ } req;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb2_transfer;
+
+/*
+ * mb3_transfer - state needed for mailbox 3 communication.
+ * @sysclk_lock: A lock used to handle concurrent sysclk requests.
+ * @sysclk_work: Work structure used for sysclk requests.
+ * @req_st: Requested clock state.
+ * @ack: Acknowledgement data
+ */
+static struct {
+ struct mutex sysclk_lock;
+ struct completion sysclk_work;
+ enum sysclk_state req_st;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb3_transfer;
+
+/*
+ * mb4_transfer - state needed for mailbox 4 communication.
+ * @lock: The transaction lock.
+ * @work: The transaction completion structure.
+ * @ack: Acknowledgement data
+ */
+static struct {
+ struct mutex lock;
+ struct completion work;
+ struct {
+ u8 header;
+ u8 status;
+ } ack;
+} mb4_transfer;
+
/*
* mb5_transfer - state needed for mailbox 5 communication.
* @lock: The transaction lock.
@@ -148,9 +476,825 @@ static struct {
} ack;
} mb5_transfer;
-/* PRCMU TCDM base IO address. */
+/* Spinlocks */
+static DEFINE_SPINLOCK(clkout_lock);
+
+/* PRCMU TCDM base IO address */
static __iomem void *tcdm_base;
+/* PRCMU MTIMER base IO address */
+static __iomem void *mtimer_base;
+
+struct clk_mgt {
+ unsigned int offset;
+ u32 pllsw;
+ u32 div;
+ bool scalable;
+ bool force50;
+};
+
+/* PRCMU Firmware Details */
+static struct {
+ u16 board;
+ u8 fw_version;
+ u8 api_version;
+} prcmu_version;
+
+static struct {
+ u32 timeout;
+ bool enabled;
+} a9wdog_timer;
+
+static DEFINE_SPINLOCK(clk_mgt_lock);
+
+#define CLK_MGT_ENTRY(_name, _scalable)[PRCMU_##_name] = { \
+ .offset = DB5500_PRCM_##_name##_MGT, \
+ .scalable = _scalable, \
+}
+
+static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
+ CLK_MGT_ENTRY(SGACLK, true),
+ CLK_MGT_ENTRY(UARTCLK, false),
+ CLK_MGT_ENTRY(MSP02CLK, false),
+ CLK_MGT_ENTRY(I2CCLK, false),
+ [PRCMU_SDMMCCLK] {
+ .offset = DB5500_PRCM_SDMMCCLK_MGT,
+ .force50 = true,
+ .scalable = false,
+
+ },
+ [PRCMU_SPARE1CLK] {
+ .offset = DB5500_PRCM_SPARE1CLK_MGT,
+ .force50 = true,
+ .scalable = false,
+
+ },
+ CLK_MGT_ENTRY(PER1CLK, false),
+ CLK_MGT_ENTRY(PER2CLK, true),
+ CLK_MGT_ENTRY(PER3CLK, true),
+ CLK_MGT_ENTRY(PER5CLK, false), /* used for SPI */
+ CLK_MGT_ENTRY(PER6CLK, true),
+ CLK_MGT_ENTRY(PWMCLK, false),
+ CLK_MGT_ENTRY(IRDACLK, false),
+ CLK_MGT_ENTRY(IRRCCLK, false),
+ CLK_MGT_ENTRY(HDMICLK, false),
+ CLK_MGT_ENTRY(APEATCLK, false),
+ CLK_MGT_ENTRY(APETRACECLK, true),
+ CLK_MGT_ENTRY(MCDECLK, true),
+ CLK_MGT_ENTRY(DSIALTCLK, false),
+ CLK_MGT_ENTRY(DMACLK, true),
+ CLK_MGT_ENTRY(B2R2CLK, true),
+ CLK_MGT_ENTRY(TVCLK, false),
+ CLK_MGT_ENTRY(RNGCLK, false),
+ CLK_MGT_ENTRY(SIACLK, false),
+ CLK_MGT_ENTRY(SVACLK, false),
+ CLK_MGT_ENTRY(ACLK, true),
+};
+
+static atomic_t modem_req_state = ATOMIC_INIT(0);
+
+bool db5500_prcmu_is_modem_requested(void)
+{
+ return (atomic_read(&modem_req_state) != 0);
+}
+
+/**
+ * prcmu_modem_req - APE requests Modem to wake up
+ *
+ * Whenever APE wants to send message to the modem, it will have to call this
+ * function to make sure that modem is awake.
+ */
+void prcmu_modem_req(void)
+{
+ u32 val;
+
+ mutex_lock(&mb0_transfer.ac_wake_lock);
+
+ val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
+ if (val & PRCM_HOSTACCESS_REQ_BIT)
+ goto unlock_and_return;
+
+ writel((val | PRCM_HOSTACCESS_REQ_BIT),
+ (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
+ atomic_set(&modem_req_state, 1);
+
+unlock_and_return:
+ mutex_unlock(&mb0_transfer.ac_wake_lock);
+
+}
+
+/**
+ * prcmu_modem_rel - APE has no more messages to send and hence releases modem.
+ *
+ * APE to Modem communication is initiated by modem_req and once the
+ * communication is completed, APE sends modem_rel to complete the protocol.
+ */
+void prcmu_modem_rel(void)
+{
+ u32 val;
+
+ mutex_lock(&mb0_transfer.ac_wake_lock);
+
+ val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
+ if (!(val & PRCM_HOSTACCESS_REQ_BIT))
+ goto unlock_and_return;
+
+ writel((val & ~PRCM_HOSTACCESS_REQ_BIT),
+ (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
+
+ atomic_set(&modem_req_state, 0);
+
+unlock_and_return:
+ mutex_unlock(&mb0_transfer.ac_wake_lock);
+}
+
+/**
+ * prcm_ape_ack - send an acknowledgement to modem
+ *
+ * On ape receiving ape_req, APE will have to acknowledge for the interrupt
+ * received. This function will send the acknowledgement by writing to the
+ * prcmu register and an interrupt is trigerred to modem.
+ */
+void prcmu_ape_ack(void)
+{
+ writel(PRCM_APE_ACK_BIT, (_PRCMU_BASE + PRCM_APE_ACK));
+}
+
+/**
+ * db5500_prcmu_modem_reset - Assert a Reset on modem
+ *
+ * This function will assert a reset request to the modem. Prior to that
+ * PRCM_HOSTACCESS_REQ must be '0'.
+ */
+void db5500_prcmu_modem_reset(void)
+{
+ mutex_lock(&mb4_transfer.lock);
+
+ /* PRCM_HOSTACCESS_REQ = 0, before asserting a reset */
+ prcmu_modem_rel();
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writeb(MB4H_CGF_MODEM_RESET, PRCM_REQ_MB4_HEADER);
+ writel(MBOX_BIT(4), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ wait_for_completion(&mb4_transfer.work);
+ if (mb4_transfer.ack.status != RC_SUCCESS ||
+ mb4_transfer.ack.header != MB4H_CGF_MODEM_RESET)
+ printk(KERN_ERR,
+ "ACK not received for modem reset interrupt\n");
+ mutex_unlock(&mb4_transfer.lock);
+}
+
+/**
+ * prcmu_config_clkout - Configure one of the programmable clock outputs.
+ * @clkout: The CLKOUT number (0 or 1).
+ * @source: Clock source.
+ * @div: The divider to be applied.
+ *
+ * Configures one of the programmable clock outputs (CLKOUTs).
+ */
+int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
+{
+ static bool configured[2] = {false, false};
+ int r = 0;
+ unsigned long flags;
+ u32 sel_val;
+ u32 div_val;
+ u32 sel_bits;
+ u32 div_bits;
+ u32 sel_mask;
+ u32 div_mask;
+ u8 sel0 = CLKOUT_SEL0_SEL_CLK;
+ u16 sel = 0;
+
+ BUG_ON(clkout > DB5500_CLKOUT1);
+ BUG_ON(source > DB5500_CLKOUT_IRDACLK);
+ BUG_ON(div > 7);
+
+ switch (source) {
+ case DB5500_CLKOUT_REF_CLK_SEL0:
+ sel0 = CLKOUT_SEL0_REF_CLK;
+ break;
+ case DB5500_CLKOUT_RTC_CLK0_SEL0:
+ sel0 = CLKOUT_SEL0_RTC_CLK0;
+ break;
+ case DB5500_CLKOUT_ULP_CLK_SEL0:
+ sel0 = CLKOUT_SEL0_ULP_CLK;
+ break;
+ case DB5500_CLKOUT_STATIC0:
+ sel = CLKOUT_SEL_STATIC0;
+ break;
+ case DB5500_CLKOUT_REFCLK:
+ sel = CLKOUT_SEL_REFCLK;
+ break;
+ case DB5500_CLKOUT_ULPCLK:
+ sel = CLKOUT_SEL_ULPCLK;
+ break;
+ case DB5500_CLKOUT_ARMCLK:
+ sel = CLKOUT_SEL_ARMCLK;
+ break;
+ case DB5500_CLKOUT_SYSACC0CLK:
+ sel = CLKOUT_SEL_SYSACC0CLK;
+ break;
+ case DB5500_CLKOUT_SOC0PLLCLK:
+ sel = CLKOUT_SEL_SOC0PLLCLK;
+ break;
+ case DB5500_CLKOUT_SOC1PLLCLK:
+ sel = CLKOUT_SEL_SOC1PLLCLK;
+ break;
+ case DB5500_CLKOUT_DDRPLLCLK:
+ sel = CLKOUT_SEL_DDRPLLCLK;
+ break;
+ case DB5500_CLKOUT_TVCLK:
+ sel = CLKOUT_SEL_TVCLK;
+ break;
+ case DB5500_CLKOUT_IRDACLK:
+ sel = CLKOUT_SEL_IRDACLK;
+ break;
+ }
+
+ switch (clkout) {
+ case DB5500_CLKOUT0:
+ sel_mask = PRCM_CLKOCR_CLKOUT0_SEL0_MASK |
+ PRCM_CLKOCR_CLKOUT0_SEL_MASK;
+ sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT) |
+ (sel << PRCM_CLKOCR_CLKOUT0_SEL_SHIFT));
+ div_mask = PRCM_CLKODIV_CLKOUT0_DIV_MASK;
+ div_bits = div << PRCM_CLKODIV_CLKOUT0_DIV_SHIFT;
+ break;
+ case DB5500_CLKOUT1:
+ sel_mask = PRCM_CLKOCR_CLKOUT1_SEL0_MASK |
+ PRCM_CLKOCR_CLKOUT1_SEL_MASK;
+ sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT) |
+ (sel << PRCM_CLKOCR_CLKOUT1_SEL_SHIFT));
+ div_mask = PRCM_CLKODIV_CLKOUT1_DIV_MASK;
+ div_bits = div << PRCM_CLKODIV_CLKOUT1_DIV_SHIFT;
+ break;
+ }
+
+ spin_lock_irqsave(&clkout_lock, flags);
+
+ if (configured[clkout]) {
+ r = -EINVAL;
+ goto unlock_and_return;
+ }
+
+ sel_val = readl(_PRCMU_BASE + PRCM_CLKOCR);
+ writel((sel_bits | (sel_val & ~sel_mask)),
+ (_PRCMU_BASE + PRCM_CLKOCR));
+
+ div_val = readl(_PRCMU_BASE + PRCM_CLKODIV);
+ writel((div_bits | (div_val & ~div_mask)),
+ (_PRCMU_BASE + PRCM_CLKODIV));
+
+ configured[clkout] = true;
+
+unlock_and_return:
+ spin_unlock_irqrestore(&clkout_lock, flags);
+
+ return r;
+}
+
+static int request_sysclk(bool enable)
+{
+ int r;
+
+ r = 0;
+ mutex_lock(&mb3_transfer.sysclk_lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
+ cpu_relax();
+
+ if (enable)
+ mb3_transfer.req_st = SYSCLK_ON;
+ else
+ mb3_transfer.req_st = SYSCLK_OFF;
+
+ writeb(mb3_transfer.req_st, (PRCM_REQ_MB3_REFCLK_MGT));
+
+ writeb(MB3H_REFCLK_REQUEST, (PRCM_REQ_MB3_HEADER));
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ /*
+ * The firmware only sends an ACK if we want to enable the
+ * SysClk, and it succeeds.
+ */
+ if (!wait_for_completion_timeout(&mb3_transfer.sysclk_work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
+ __func__);
+ r = -EIO;
+ WARN(1, "Failed to set sysclk");
+ goto unlock_and_return;
+ }
+
+ if ((mb3_transfer.ack.header != MB3H_REFCLK_REQUEST) ||
+ (mb3_transfer.ack.status != mb3_transfer.req_st)) {
+ r = -EIO;
+ }
+
+unlock_and_return:
+ mutex_unlock(&mb3_transfer.sysclk_lock);
+
+ return r;
+}
+
+static int request_timclk(bool enable)
+{
+ u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
+
+ if (!enable)
+ val |= PRCM_TCR_STOP_TIMERS;
+ writel(val, _PRCMU_BASE + PRCM_TCR);
+
+ return 0;
+}
+
+static int request_clk(u8 clock, bool enable)
+{
+ int r = 0;
+
+ BUG_ON(clock >= DB5500_NUM_CLK_CLIENTS);
+
+ mutex_lock(&mb2_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ /* fill in mailbox */
+ writeb(clock, PRCM_REQ_MB2_CLK_CLIENT);
+ writeb(enable, PRCM_REQ_MB2_CLK_STATE);
+
+ writeb(MB2H_CLK_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: request_clk() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed in request_clk");
+ goto unlock_and_return;
+ }
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_CLK_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+ return r;
+}
+
+static int request_reg_clock(u8 clock, bool enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ WARN_ON(!clk_mgt[clock].offset);
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
+ if (enable) {
+ val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
+ } else {
+ clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
+ val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
+ }
+ writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
+
+ /* Release the HW semaphore. */
+ writel(0, _PRCMU_BASE + PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+
+ return 0;
+}
+
+/*
+ * request_pll() - Request for a pll to be enabled or disabled.
+ * @pll: The pll for which the request is made.
+ * @enable: Whether the clock should be enabled (true) or disabled (false).
+ *
+ * This function should only be used by the clock implementation.
+ * Do not use it from any other place!
+ */
+static int request_pll(u8 pll, bool enable)
+{
+ int r = 0;
+
+ BUG_ON(pll >= DB5500_NUM_PLL_ID);
+ mutex_lock(&mb2_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ mb2_transfer.req.pll_st[pll] = enable;
+
+ /* fill in mailbox */
+ writeb(pll, PRCM_REQ_MB2_PLL_CLIENT);
+ writeb(mb2_transfer.req.pll_st[pll], PRCM_REQ_MB2_PLL_STATE);
+
+ writeb(MB2H_PLL_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: set_pll() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed to set pll");
+ goto unlock_and_return;
+ }
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_PLL_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+
+ return r;
+}
+
+/**
+ * db5500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
+ * @clock: The clock for which the request is made.
+ * @enable: Whether the clock should be enabled (true) or disabled (false).
+ *
+ * This function should only be used by the clock implementation.
+ * Do not use it from any other place!
+ */
+int db5500_prcmu_request_clock(u8 clock, bool enable)
+{
+ /* MSP1 & CD clocks are handled by FW */
+ if (clock == PRCMU_MSP1CLK)
+ return request_clk(DB5500_MSP1CLK, enable);
+ else if (clock == PRCMU_CDCLK)
+ return request_clk(DB5500_CDCLK, enable);
+ else if (clock == PRCMU_IRDACLK)
+ return request_clk(DB5500_IRDACLK, enable);
+ else if (clock < PRCMU_NUM_REG_CLOCKS)
+ return request_reg_clock(clock, enable);
+ else if (clock == PRCMU_TIMCLK)
+ return request_timclk(enable);
+ else if (clock == PRCMU_PLLSOC0)
+ return request_pll(DB5500_PLL_SOC0, enable);
+ else if (clock == PRCMU_PLLSOC1)
+ return request_pll(DB5500_PLL_SOC1, enable);
+ else if (clock == PRCMU_PLLDDR)
+ return request_pll(DB5500_PLL_DDR, enable);
+ else if (clock == PRCMU_SYSCLK)
+ return request_sysclk(enable);
+ else
+ return -EINVAL;
+}
+
+/* This function should only be called while mb0_transfer.lock is held. */
+static void config_wakeups(void)
+{
+ static u32 last_dbb_events;
+ static u32 last_abb_events;
+ u32 dbb_events;
+ u32 abb_events;
+
+ dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
+
+ abb_events = mb0_transfer.req.abb_events;
+
+ if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
+ return;
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ cpu_relax();
+
+ writel(dbb_events, PRCM_REQ_MB0_WAKEUP_DBB);
+ writel(abb_events, PRCM_REQ_MB0_WAKEUP_ABB);
+ writeb(MB0H_WAKE_UP_CFG, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ last_dbb_events = dbb_events;
+ last_abb_events = abb_events;
+}
+
+int db5500_prcmu_config_esram0_deep_sleep(u8 state)
+{
+ unsigned long flags;
+
+ if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
+ (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ if (state == ESRAM0_DEEP_SLEEP_STATE_RET)
+ writeb(RET_ST, PRCM_REQ_MB0_ESRAM0_STATE);
+ else
+ writeb(OFF_ST, PRCM_REQ_MB0_ESRAM0_STATE);
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+
+ return 0;
+}
+
+int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
+{
+ int r = 0;
+ unsigned long flags;
+
+ /* Deep Idle is not supported in DB5500 */
+ BUG_ON((state < PRCMU_AP_SLEEP) || (state >= PRCMU_AP_DEEP_IDLE));
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ cpu_relax();
+
+ switch (state) {
+ case PRCMU_AP_IDLE:
+ writeb(DB5500_AP_IDLE, PRCM_REQ_MB0_AP_POWER_STATE);
+ /* TODO: Can be high latency */
+ writeb(DDR_PWR_STATE_UNCHANGED, PRCM_REQ_MB0_DDR_STATE);
+ break;
+ case PRCMU_AP_SLEEP:
+ writeb(DB5500_AP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE);
+ break;
+ case PRCMU_AP_DEEP_SLEEP:
+ writeb(DB5500_AP_DEEP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE);
+ break;
+ default:
+ r = -EINVAL;
+ goto unlock_return;
+ }
+ writeb((keep_ap_pll ? 1 : 0), PRCM_REQ_MB0_AP_PLL_STATE);
+ writeb((keep_ulp_clk ? 1 : 0), PRCM_REQ_MB0_ULP_CLOCK_STATE);
+
+ writeb(MB0H_PWR_STATE_TRANS, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+unlock_return:
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+
+ return r;
+}
+
+u8 db5500_prcmu_get_power_state_result(void)
+{
+ u8 status = readb_relaxed(PRCM_ACK_MB0_AP_PWRSTTR_STATUS);
+
+ /*
+ * Callers expect all the status values to match 8500. Adjust for
+ * PendingReq_Er (0x2b).
+ */
+ if (status == 0x2b)
+ status = PRCMU_PRCMU2ARMPENDINGIT_ER;
+
+ return status;
+}
+
+void db5500_prcmu_enable_wakeups(u32 wakeups)
+{
+ unsigned long flags;
+ u32 bits;
+ int i;
+
+ BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
+
+ for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
+ if (wakeups & BIT(i)) {
+ if (prcmu_wakeup_bit[i] == 0)
+ WARN(1, "WAKEUP NOT SUPPORTED");
+ else
+ bits |= prcmu_wakeup_bit[i];
+ }
+ }
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ mb0_transfer.req.dbb_wakeups = bits;
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+void db5500_prcmu_config_abb_event_readout(u32 abb_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ mb0_transfer.req.abb_events = abb_events;
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+void db5500_prcmu_get_abb_event_buffer(void __iomem **buf)
+{
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ *buf = (PRCM_ACK_MB0_WAKEUP_1_ABB);
+ else
+ *buf = (PRCM_ACK_MB0_WAKEUP_0_ABB);
+}
+
+/* This function should be called with lock */
+static int mailbox4_request(u8 mb4_request, u8 ack_request)
+{
+ int ret = 0;
+
+ writeb(mb4_request, PRCM_REQ_MB4_HEADER);
+ writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+
+ if (!wait_for_completion_timeout(&mb4_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: MB4 request %d failed", mb4_request);
+ ret = -EIO;
+ WARN(1, "prcmu: failed mb4 request");
+ goto failed;
+ }
+
+ if (mb4_transfer.ack.header != ack_request ||
+ mb4_transfer.ack.status != RC_SUCCESS)
+ ret = -EIO;
+failed:
+ return ret;
+}
+
+int db5500_prcmu_get_hotdog(void)
+{
+ return readw(PRCM_SHARE_INFO_HOTDOG);
+}
+
+int db5500_prcmu_config_hotdog(u8 threshold)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(threshold, PRCM_REQ_MB4_HOTDOG_THRESHOLD);
+ r = mailbox4_request(MB4H_CFG_HOTDOG, MB4H_ACK_CFG_HOTDOG);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+int db5500_prcmu_config_hotmon(u8 low, u8 high)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(low, PRCM_REQ_MB4_HOTMON_LOW);
+ writew(high, PRCM_REQ_MB4_HOTMON_HIGH);
+
+ r = mailbox4_request(MB4H_CFG_HOTMON, MB4H_ACK_CFG_HOTMON);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+static int config_hot_period(u16 val)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ writew(val, PRCM_REQ_MB4_HOT_PERIOD);
+ r = mailbox4_request(MB4H_CFG_HOTPERIOD, MB4H_ACK_CFG_HOTPERIOD);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+/*
+ * period in milli seconds
+ */
+int db5500_prcmu_start_temp_sense(u16 period)
+{
+ if (period == 0xFFFF)
+ return -EINVAL;
+
+ return config_hot_period(period);
+}
+
+int db5500_prcmu_stop_temp_sense(void)
+{
+ return config_hot_period(0xFFFF);
+}
+
+static int prcmu_a9wdog(u8 req, u8 ack)
+{
+ int r = 0;
+
+ mutex_lock(&mb4_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
+ cpu_relax();
+
+ r = mailbox4_request(req, ack);
+
+ mutex_unlock(&mb4_transfer.lock);
+
+ return r;
+}
+
+static void prcmu_a9wdog_set_interrupt(bool enable)
+{
+ if (enable) {
+ writel(PRCM_TIMER0_IRQ_RTOS1_SET,
+ (mtimer_base + PRCM_TIMER0_IRQ_EN_SET_OFFSET));
+ } else {
+ writel(PRCM_TIMER0_IRQ_RTOS1_CLR,
+ (mtimer_base + PRCM_TIMER0_IRQ_EN_CLR_OFFSET));
+ }
+}
+
+static void prcmu_a9wdog_set_timeout(u32 timeout)
+{
+ u32 comp_timeout;
+
+ comp_timeout = readl(mtimer_base + PRCM_TIMER0_RTOS_COUNTER_OFFSET) +
+ timeout;
+ writel(comp_timeout, mtimer_base + PRCM_TIMER0_RTOS_COMP1_OFFSET);
+}
+
+int db5500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ /*
+ * Sleep auto off feature is not supported. Resume and
+ * suspend will be handled by watchdog driver.
+ */
+ return 0;
+}
+
+int db5500_prcmu_enable_a9wdog(u8 id)
+{
+ int r = 0;
+
+ if (a9wdog_timer.enabled)
+ return -EPERM;
+
+ prcmu_a9wdog_set_interrupt(true);
+
+ r = prcmu_a9wdog(MB4H_CGF_A9WDOG_EN_PREBARK,
+ MB4H_ACK_CGF_A9WDOG_EN_PREBARK);
+ if (!r)
+ a9wdog_timer.enabled = true;
+ else
+ prcmu_a9wdog_set_interrupt(false);
+
+ return r;
+}
+
+int db5500_prcmu_disable_a9wdog(u8 id)
+{
+ if (!a9wdog_timer.enabled)
+ return -EPERM;
+
+ prcmu_a9wdog_set_interrupt(false);
+
+ a9wdog_timer.enabled = false;
+
+ return prcmu_a9wdog(MB4H_CGF_A9WDOG_DIS,
+ MB4H_ACK_CGF_A9WDOG_DIS);
+}
+
+int db5500_prcmu_kick_a9wdog(u8 id)
+{
+ int r = 0;
+
+ if (a9wdog_timer.enabled)
+ prcmu_a9wdog_set_timeout(a9wdog_timer.timeout);
+ else
+ r = -EPERM;
+
+ return r;
+}
+
+int db5500_prcmu_load_a9wdog(u8 id, u32 timeout)
+{
+ if (a9wdog_timer.enabled)
+ return -EPERM;
+
+ prcmu_a9wdog_set_timeout(timeout);
+ a9wdog_timer.timeout = timeout;
+
+ return 0;
+}
+
/**
* db5500_prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
@@ -170,14 +1314,14 @@ int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
writeb(reg, PRCM_REQ_MB5_I2C_REG);
writeb(size, PRCM_REQ_MB5_I2C_SIZE);
writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER);
- writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
wait_for_completion(&mb5_transfer.work);
r = 0;
@@ -211,7 +1355,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
mutex_lock(&mb5_transfer.lock);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
writeb(reg, PRCM_REQ_MB5_I2C_REG);
@@ -219,7 +1363,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size);
writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER);
- writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
wait_for_completion(&mb5_transfer.work);
if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) &&
@@ -233,42 +1377,385 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
return r;
}
+/**
+ * db5500_prcmu_set_arm_opp - set the appropriate ARM OPP
+ * @opp: The new ARM operating point to which transition is to be made
+ * Returns: 0 on success, non-zero on failure
+ *
+ * This function sets the the operating point of the ARM.
+ */
+int db5500_prcmu_set_arm_opp(u8 opp)
+{
+ int r;
+ u8 db5500_opp;
+
+ r = 0;
+
+ switch (opp) {
+ case ARM_EXTCLK:
+ db5500_opp = DB5500_ARM_EXT_OPP;
+ break;
+ case ARM_50_OPP:
+ db5500_opp = DB5500_ARM_50_OPP;
+ break;
+ case ARM_100_OPP:
+ db5500_opp = DB5500_ARM_100_OPP;
+ break;
+ default:
+ pr_err("prcmu: %s() received wrong opp value: %d\n",
+ __func__, opp);
+ r = -EINVAL;
+ goto bailout;
+ }
+
+ mutex_lock(&mb1_transfer.lock);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_ARM_OPP, PRCM_REQ_MB1_HEADER);
+
+ writeb(db5500_opp, PRCM_REQ_MB1_ARM_OPP);
+ writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ if (!wait_for_completion_timeout(&mb1_transfer.work,
+ msecs_to_jiffies(20000))) {
+ r = -EIO;
+ WARN(1, "prcmu: failed to set arm opp");
+ goto unlock_and_return;
+ }
+
+ if (mb1_transfer.ack.header != MB1H_ARM_OPP ||
+ (mb1_transfer.ack.arm_opp != db5500_opp) ||
+ (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS))
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb1_transfer.lock);
+bailout:
+ if (!r)
+ prcmu_debug_arm_opp_log(opp);
+ return r;
+}
+
+static void __init prcmu_ape_clocks_init(void)
+{
+ u8 opp = db5500_prcmu_get_ape_opp();
+ unsigned long flags;
+ int i;
+
+ WARN(opp != APE_100_OPP, "%s: Initial APE OPP (%u) not 100%%?\n",
+ __func__, opp);
+
+ for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) {
+ struct clk_mgt *clkmgt = &clk_mgt[i];
+ u32 clkval;
+ u32 div;
+
+ if (!clkmgt->scalable && !clkmgt->force50)
+ continue;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ clkval = readl(_PRCMU_BASE + clkmgt->offset);
+ div = clkval & PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ div >>= PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+
+ if (clkmgt->force50) {
+ div *= 2;
+
+ clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ clkval |= div << PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+ writel(clkval, _PRCMU_BASE + clkmgt->offset);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+ continue;
+ }
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+
+ clkmgt->div = div;
+ if (!div)
+ pr_err("%s: scalable clock at offset %#x has zero divisor\n",
+ __func__, clkmgt->offset);
+ }
+}
+
+static void prcmu_ape_clocks_scale(u8 opp)
+{
+ unsigned long irqflags;
+ unsigned int i;
+ u32 clkval;
+
+ /*
+ * Note: calling printk() under the following lock can cause lock
+ * recursion via clk_enable() for the console UART!
+ */
+ spin_lock_irqsave(&clk_mgt_lock, irqflags);
+
+ /* take a lock on HW (HWSEM)*/
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) {
+ u32 divval;
+
+ if (!clk_mgt[i].scalable)
+ continue;
+
+ clkval = readl(_PRCMU_BASE + clk_mgt[i].offset);
+ divval = clk_mgt[i].div;
+
+ pr_debug("PRCMU: reg %#x prev clk = 0x%x stored div = 0x%x\n",
+ clk_mgt[i].offset, clkval, divval);
+
+ if (opp == DB5500_APE_50_OPP)
+ divval *= 2;
+
+ clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
+ clkval |= divval << PRCM_CLK_MGT_CLKPLLDIV_SHIFT;
+
+ pr_debug("PRCMU: wr 0x%x in reg 0x%x\n",
+ clkval, clk_mgt[i].offset);
+
+ writel(clkval, _PRCMU_BASE + clk_mgt[i].offset);
+ }
+
+ /* release lock */
+ writel(0, (_PRCMU_BASE + PRCM_SEM));
+
+ spin_unlock_irqrestore(&clk_mgt_lock, irqflags);
+}
+/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
+static void request_even_slower_clocks(bool enable)
+{
+ void __iomem *clock_reg[] = {
+ (_PRCMU_BASE + DB5500_PRCM_ACLK_MGT),
+ (_PRCMU_BASE + DB5500_PRCM_DMACLK_MGT)
+ };
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < ARRAY_SIZE(clock_reg); i++) {
+ u32 val;
+ u32 div;
+
+ val = readl(clock_reg[i]);
+ div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
+ if (enable) {
+ if ((div <= 1) || (div > 15)) {
+ pr_err("prcmu: Bad clock divider %d in %s\n",
+ div, __func__);
+ goto unlock_and_return;
+ }
+ div <<= 1;
+ } else {
+ if (div <= 2)
+ goto unlock_and_return;
+ div >>= 1;
+ }
+ val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
+ (div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
+ writel(val, clock_reg[i]);
+ }
+
+unlock_and_return:
+ /* Release the HW semaphore. */
+ writel(0, _PRCMU_BASE + PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+int db5500_prcmu_set_ape_opp(u8 opp)
+{
+ int ret = 0;
+ u8 db5500_opp;
+ if (opp == mb1_transfer.req_ape_opp)
+ return 0;
+
+ switch (opp) {
+ case APE_100_OPP:
+ db5500_opp = DB5500_APE_100_OPP;
+ break;
+ case APE_50_OPP:
+ case APE_50_PARTLY_25_OPP:
+ db5500_opp = DB5500_APE_50_OPP;
+ break;
+ default:
+ pr_err("prcmu: %s() received wrong opp value: %d\n",
+ __func__, opp);
+ ret = -EINVAL;
+ goto bailout;
+ }
+
+ mutex_lock(&mb1_transfer.lock);
+ if (mb1_transfer.req_ape_opp == APE_50_PARTLY_25_OPP)
+ request_even_slower_clocks(false);
+ if ((opp != APE_100_OPP) && (mb1_transfer.req_ape_opp != APE_100_OPP))
+ goto skip_message;
+
+ prcmu_ape_clocks_scale(db5500_opp);
+
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
+ cpu_relax();
+
+ writeb(MB1H_APE_OPP, PRCM_REQ_MB1_HEADER);
+ writeb(db5500_opp, PRCM_REQ_MB1_APE_OPP);
+ writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
+
+ if (!wait_for_completion_timeout(&mb1_transfer.work,
+ msecs_to_jiffies(20000))) {
+ ret = -EIO;
+ WARN(1, "prcmu: failed to set ape opp to %u", opp);
+ goto unlock_and_return;
+ }
+
+ if (mb1_transfer.ack.header != MB1H_APE_OPP ||
+ (mb1_transfer.ack.ape_opp != db5500_opp) ||
+ (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS))
+ ret = -EIO;
+
+skip_message:
+ if ((!ret && (opp == APE_50_PARTLY_25_OPP)) ||
+ (ret && (mb1_transfer.req_ape_opp == APE_50_PARTLY_25_OPP)))
+ request_even_slower_clocks(true);
+ if (!ret)
+ mb1_transfer.req_ape_opp = opp;
+unlock_and_return:
+ mutex_unlock(&mb1_transfer.lock);
+bailout:
+ return ret;
+}
+
+int db5500_prcmu_get_ape_opp(void)
+{
+ u8 opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+
+ switch (opp) {
+ case DB5500_APE_100_OPP:
+ return APE_100_OPP;
+ case DB5500_APE_50_OPP:
+ return APE_50_OPP;
+ default:
+ pr_err("prcmu: %s() read unknown opp value: %d\n",
+ __func__, opp);
+ return APE_100_OPP;
+ }
+}
+
+int db5500_prcmu_get_ddr_opp(void)
+{
+ return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+}
+
+int db5500_prcmu_set_ddr_opp(u8 opp)
+{
+ if (opp != DDR_100_OPP && opp != DDR_50_OPP)
+ return -EINVAL;
+
+ writeb(opp, _PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
+
+ return 0;
+}
+
+/**
+ * db5500_prcmu_get_arm_opp - get the current ARM OPP
+ *
+ * Returns: the current ARM OPP
+ */
+int db5500_prcmu_get_arm_opp(void)
+{
+ u8 opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP);
+
+ switch (opp) {
+ case DB5500_ARM_EXT_OPP:
+ return ARM_EXTCLK;
+ case DB5500_ARM_50_OPP:
+ return ARM_50_OPP;
+ case DB5500_ARM_100_OPP:
+ return ARM_100_OPP;
+ default:
+ pr_err("prcmu: %s() read unknown opp value: %d\n",
+ __func__, opp);
+ return ARM_100_OPP;
+ }
+}
+
+int prcmu_resetout(u8 resoutn, u8 state)
+{
+ int offset;
+ int pin = -1;
+
+ offset = state > 0 ? PRCM_RESOUTN_SET_OFFSET : PRCM_RESOUTN_CLR_OFFSET;
+
+ switch (resoutn) {
+ case 0:
+ pin = PRCMU_RESOUTN0_PIN;
+ break;
+ case 1:
+ pin = PRCMU_RESOUTN1_PIN;
+ break;
+ case 2:
+ pin = PRCMU_RESOUTN2_PIN;
+ default:
+ break;
+ }
+
+ if (pin > 0)
+ writel(pin, _PRCMU_BASE + offset);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
int db5500_prcmu_enable_dsipll(void)
{
int i;
+ int ret = 0;
/* Enable DSIPLL_RESETN resets */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
+ writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
- writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
+ writel(PRCMU_UNCLAMP_DSIPLL, _PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR);
/* Set DSI PLL FREQ */
- writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
+ writel(PRCMU_PLLDSI_FREQ_SETTING, _PRCMU_BASE + PRCM_PLLDSI_FREQ);
writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
- PRCM_DSI_PLLOUT_SEL);
+ _PRCMU_BASE + PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
- writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
+ writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV);
/* Start DSI PLL */
- writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
+ writel(PRCMU_ENABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE);
/* Reset DSI PLL */
- writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
+ writel(PRCMU_DSI_RESET_SW, _PRCMU_BASE + PRCM_DSI_SW_RESET);
for (i = 0; i < 10; i++) {
- if ((readl(PRCM_PLLDSI_LOCKP) &
+ if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED)
break;
udelay(100);
}
+
+ if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
+ PRCMU_PLLDSI_LOCKP_LOCKED)
+ != PRCMU_PLLDSI_LOCKP_LOCKED)
+ ret = -EIO;
/* Release DSIPLL_RESETN */
- writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
- return 0;
+ writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_SET);
+ return ret;
}
int db5500_prcmu_disable_dsipll(void)
{
/* Disable dsi pll */
- writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
+ writel(PRCMU_DISABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE);
/* Disable escapeclock */
- writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
+ writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV);
return 0;
}
@@ -276,27 +1763,150 @@ int db5500_prcmu_set_display_clocks(void)
{
/* HDMI and TVCLK Should be handled somewhere else */
/* PLLDIV=8, PLLSW=2, CLKEN=1 */
- writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
+ writel(PRCMU_DSI_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_HDMICLK_MGT);
/* PLLDIV=14, PLLSW=2, CLKEN=1 */
- writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
+ writel(PRCMU_DSI_LP_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_TVCLK_MGT);
return 0;
}
+u32 db5500_prcmu_read(unsigned int reg)
+{
+ return readl_relaxed(_PRCMU_BASE + reg);
+}
+
+void db5500_prcmu_write(unsigned int reg, u32 value)
+{
+ writel_relaxed(value, _PRCMU_BASE + reg);
+}
+
+void db5500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(_PRCMU_BASE + reg);
+ val = (val & ~mask) | (value & mask);
+ writel_relaxed(val, _PRCMU_BASE + reg);
+}
+
+/**
+ * db5500_prcmu_system_reset - System reset
+ *
+ * Saves the reset reason code and then sets the APE_SOFTRST register which
+ * fires an interrupt to fw
+ */
+void db5500_prcmu_system_reset(u16 reset_code)
+{
+ writew(reset_code, PRCM_SW_RST_REASON);
+ writel(1, _PRCMU_BASE + PRCM_APE_SOFTRST);
+}
+
+/**
+ * db5500_prcmu_get_reset_code - Retrieve SW reset reason code
+ *
+ * Retrieves the reset reason code stored by prcmu_system_reset() before
+ * last restart.
+ */
+u16 db5500_prcmu_get_reset_code(void)
+{
+ return readw(PRCM_SW_RST_REASON);
+}
+
static void ack_dbb_wakeup(void)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
- while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
- writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
- writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
+ writeb(MB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
+int db5500_prcmu_set_epod(u16 epod, u8 epod_state)
+{
+ int r = 0;
+ bool ram_retention = false;
+
+ /* check argument */
+ BUG_ON(epod < DB5500_EPOD_ID_BASE);
+ BUG_ON(epod_state > EPOD_STATE_ON);
+ BUG_ON((epod - DB5500_EPOD_ID_BASE) >= DB5500_NUM_EPOD_ID);
+
+ if (epod == DB5500_EPOD_ID_ESRAM12)
+ ram_retention = true;
+
+ /* check argument */
+ BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
+
+ /* get lock */
+ mutex_lock(&mb2_transfer.lock);
+
+ /* wait for mailbox */
+ while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
+ cpu_relax();
+
+ /* Retention is allowed only for ESRAM12 */
+ if (epod == DB5500_EPOD_ID_ESRAM12) {
+ switch (epod_state) {
+ case EPOD_STATE_ON:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_ON;
+ break;
+ case EPOD_STATE_OFF:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_OFF;
+ break;
+ case EPOD_STATE_RAMRET:
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OOR_RET;
+ break;
+ default:
+ r = -EINVAL;
+ goto unlock_and_return;
+ break;
+ }
+ } else {
+ if (epod_state == EPOD_STATE_ON)
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_ON;
+ else if (epod_state == EPOD_STATE_OFF)
+ mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] =
+ EPOD_OFF;
+ else {
+ r = -EINVAL;
+ goto unlock_and_return;
+ }
+ }
+ /* fill in mailbox */
+ writeb((epod - DB5500_EPOD_ID_BASE), PRCM_REQ_MB2_EPOD_CLIENT);
+ writeb(mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE],
+ PRCM_REQ_MB2_EPOD_STATE);
+
+ writeb(MB2H_EPOD_REQUEST, PRCM_REQ_MB2_HEADER);
+
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET);
+
+ if (!wait_for_completion_timeout(&mb2_transfer.work,
+ msecs_to_jiffies(20000))) {
+ pr_err("prcmu: set_epod() failed.\n");
+ r = -EIO;
+ WARN(1, "Failed to set epod");
+ goto unlock_and_return;
+ }
+
+ if (mb2_transfer.ack.status != RC_SUCCESS ||
+ mb2_transfer.ack.header != MB2H_EPOD_REQUEST)
+ r = -EIO;
+
+unlock_and_return:
+ mutex_unlock(&mb2_transfer.lock);
+ return r;
+}
+
static inline void print_unknown_header_warning(u8 n, u8 header)
{
pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
@@ -306,11 +1916,31 @@ static inline void print_unknown_header_warning(u8 n, u8 header)
static bool read_mailbox_0(void)
{
bool r;
+ u32 ev;
+ unsigned int n;
+
u8 header;
header = readb(PRCM_ACK_MB0_HEADER);
switch (header) {
- case AMB0H_WAKE_UP:
+ case MB0H_WAKE_UP:
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB);
+ else
+ ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB);
+
+ prcmu_debug_register_mbox0_event(ev,
+ (mb0_transfer.req.dbb_irqs |
+ mb0_transfer.req.dbb_wakeups));
+
+ ev &= mb0_transfer.req.dbb_irqs;
+
+ for (n = 0; n < NUM_DB5500_PRCMU_WAKEUPS; n++) {
+ if (ev & prcmu_irq_bit[n]) {
+ if (n != IRQ_INDEX(ABB))
+ generic_handle_irq(IRQ_DB5500_PRCMU_BASE + n);
+ }
+ }
r = true;
break;
default:
@@ -318,31 +1948,123 @@ static bool read_mailbox_0(void)
r = false;
break;
}
- writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return r;
}
static bool read_mailbox_1(void)
{
- writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
+ u8 header;
+ bool do_complete = true;
+
+ header = mb1_transfer.ack.header = readb(PRCM_ACK_MB1_HEADER);
+
+ switch (header) {
+ case MB1H_ARM_OPP:
+ mb1_transfer.ack.arm_opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP);
+ mb1_transfer.ack.arm_voltage_st =
+ readb(PRCM_ACK_MB1_ARM_VOLT_STATUS);
+ break;
+ case MB1H_APE_OPP:
+ mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+ mb1_transfer.ack.ape_voltage_st =
+ readb(PRCM_ACK_MB1_APE_VOLT_STATUS);
+ break;
+ case MB1H_ARM_APE_OPP:
+ mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP);
+ mb1_transfer.ack.ape_voltage_st =
+ readb(PRCM_ACK_MB1_APE_VOLT_STATUS);
+ break;
+ default:
+ print_unknown_header_warning(1, header);
+ do_complete = false;
+ break;
+ }
+
+ writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+
+ if (do_complete)
+ complete(&mb1_transfer.work);
+
return false;
}
static bool read_mailbox_2(void)
{
- writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
+ u8 header;
+
+ header = readb(PRCM_ACK_MB2_HEADER);
+ mb2_transfer.ack.header = header;
+ switch (header) {
+ case MB2H_EPOD_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_EPOD_STATUS);
+ break;
+ case MB2H_CLK_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_CLK_STATUS);
+ break;
+ case MB2H_PLL_REQUEST:
+ mb2_transfer.ack.status = readb(PRCM_ACK_MB2_PLL_STATUS);
+ break;
+ default:
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ pr_err("prcmu: Wrong ACK received for MB2 request \n");
+ return false;
+ break;
+ }
+ writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ complete(&mb2_transfer.work);
return false;
}
static bool read_mailbox_3(void)
{
- writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
+ u8 header;
+
+ header = readb(PRCM_ACK_MB3_HEADER);
+ mb3_transfer.ack.header = header;
+ switch (header) {
+ case MB3H_REFCLK_REQUEST:
+ mb3_transfer.ack.status = readb(PRCM_ACK_MB3_REFCLK_REQ);
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ complete(&mb3_transfer.sysclk_work);
+ break;
+ default:
+ writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
+ pr_err("prcmu: wrong MB3 header\n");
+ break;
+ }
+
return false;
}
static bool read_mailbox_4(void)
{
- writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
+ u8 header;
+ bool do_complete = true;
+
+ header = readb(PRCM_ACK_MB4_HEADER);
+ mb4_transfer.ack.header = header;
+ switch (header) {
+ case MB4H_ACK_CFG_HOTDOG:
+ case MB4H_ACK_CFG_HOTMON:
+ case MB4H_ACK_CFG_HOTPERIOD:
+ case MB4H_ACK_CFG_MODEM_RESET:
+ case MB4H_ACK_CGF_A9WDOG_EN_PREBARK:
+ case MB4H_ACK_CGF_A9WDOG_EN_NOPREBARK:
+ case MB4H_ACK_CGF_A9WDOG_DIS:
+ mb4_transfer.ack.status = readb(PRCM_ACK_MB4_REQUESTS);
+ break;
+ default:
+ print_unknown_header_warning(4, header);
+ do_complete = false;
+ break;
+ }
+
+ writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLEAR));
+
+ if (do_complete)
+ complete(&mb4_transfer.work);
+
return false;
}
@@ -363,19 +2085,19 @@ static bool read_mailbox_5(void)
print_unknown_header_warning(5, header);
break;
}
- writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
static bool read_mailbox_6(void)
{
- writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(6), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
static bool read_mailbox_7(void)
{
- writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
+ writel(MBOX_BIT(7), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
return false;
}
@@ -396,7 +2118,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
u8 n;
irqreturn_t r;
- bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
+ bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
if (unlikely(!bits))
return IRQ_NONE;
@@ -406,6 +2128,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
bits -= MBOX_BIT(n);
if (read_mailbox[n]())
r = IRQ_WAKE_THREAD;
+ prcmu_debug_register_interrupt(n);
}
}
return r;
@@ -413,39 +2136,271 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
{
+ u32 ev;
+
+ /*
+ * ABB needs to be handled before the wakeup because
+ * the ping/pong buffers for ABB events could change
+ * after we acknowledge the wakeup.
+ */
+ if (readb(PRCM_ACK_MB0_READ_POINTER) & 1)
+ ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB);
+ else
+ ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB);
+
+ ev &= mb0_transfer.req.dbb_irqs;
+ if (ev & WAKEUP_BIT_ABB)
+ handle_nested_irq(IRQ_DB5500_PRCMU_ABB);
+
ack_dbb_wakeup();
+
return IRQ_HANDLED;
}
+static void prcmu_mask_work(struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.lock, flags);
+
+ config_wakeups();
+
+ spin_unlock_irqrestore(&mb0_transfer.lock, flags);
+}
+
+static void prcmu_irq_mask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
+
+ mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE];
+
+ spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
+ schedule_work(&mb0_transfer.mask_work);
+}
+
+static void prcmu_irq_unmask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
+
+ mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE];
+
+ spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
+ schedule_work(&mb0_transfer.mask_work);
+}
+
+static void noop(struct irq_data *d)
+{
+}
+
+static struct irq_chip prcmu_irq_chip = {
+ .name = "prcmu",
+ .irq_disable = prcmu_irq_mask,
+ .irq_ack = noop,
+ .irq_mask = prcmu_irq_mask,
+ .irq_unmask = prcmu_irq_unmask,
+};
+
void __init db5500_prcmu_early_init(void)
{
+ unsigned int i;
+ void *tcpm_base = ioremap_nocache(U5500_PRCMU_TCPM_BASE, SZ_4K);
+
+ if (tcpm_base != NULL) {
+ int version_high, version_low;
+
+ version_high = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
+ version_low = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET + 4);
+ prcmu_version.board = (version_high >> 24) & 0xFF;
+ prcmu_version.fw_version = version_high & 0xFF;
+ prcmu_version.api_version = version_low & 0xFF;
+
+ pr_info("PRCMU Firmware Version: 0x%x\n",
+ prcmu_version.fw_version);
+ pr_info("PRCMU API Version: 0x%x\n",
+ prcmu_version.api_version);
+
+ iounmap(tcpm_base);
+ }
+
tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE);
+ mtimer_base = __io_address(U5500_MTIMER_BASE);
spin_lock_init(&mb0_transfer.lock);
+ spin_lock_init(&mb0_transfer.dbb_irqs_lock);
+ mutex_init(&mb0_transfer.ac_wake_lock);
+ mutex_init(&mb1_transfer.lock);
+ init_completion(&mb1_transfer.work);
+ mutex_init(&mb2_transfer.lock);
+ init_completion(&mb2_transfer.work);
+ mutex_init(&mb3_transfer.sysclk_lock);
+ init_completion(&mb3_transfer.sysclk_work);
+ mutex_init(&mb4_transfer.lock);
+ init_completion(&mb4_transfer.work);
mutex_init(&mb5_transfer.lock);
init_completion(&mb5_transfer.work);
+
+ INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
+
+ /* Initalize irqs. */
+ for (i = 0; i < NUM_DB5500_PRCMU_WAKEUPS; i++) {
+ unsigned int irq;
+
+ irq = IRQ_DB5500_PRCMU_BASE + i;
+ irq_set_chip_and_handler(irq, &prcmu_irq_chip,
+ handle_simple_irq);
+ if (irq == IRQ_DB5500_PRCMU_ABB)
+ irq_set_nested_thread(irq, true);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ prcmu_ape_clocks_init();
+}
+
+/*
+ * Power domain switches (ePODs) modeled as regulators for the DB5500 SoC
+ */
+static struct regulator_consumer_supply db5500_vape_consumers[] = {
+ REGULATOR_SUPPLY("v-ape", NULL),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+ REGULATOR_SUPPLY("vcore", "sdi0"),
+ REGULATOR_SUPPLY("vcore", "sdi1"),
+ REGULATOR_SUPPLY("vcore", "sdi2"),
+ REGULATOR_SUPPLY("vcore", "sdi3"),
+ REGULATOR_SUPPLY("vcore", "sdi4"),
+ REGULATOR_SUPPLY("v-uart", "uart0"),
+ REGULATOR_SUPPLY("v-uart", "uart1"),
+ REGULATOR_SUPPLY("v-uart", "uart2"),
+ REGULATOR_SUPPLY("v-uart", "uart3"),
+ REGULATOR_SUPPLY("v-ape", "db5500-keypad"),
+};
+
+static struct regulator_consumer_supply db5500_sga_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.0"),
+ REGULATOR_SUPPLY("v-mali", NULL),
+};
+
+static struct regulator_consumer_supply db5500_hva_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.1"),
+ REGULATOR_SUPPLY("v-hva", NULL),
+};
+
+static struct regulator_consumer_supply db5500_sia_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.2"),
+ REGULATOR_SUPPLY("v-sia", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply db5500_disp_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.3"),
+ REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
+ REGULATOR_SUPPLY("vsupply", "mcde"),
+};
+
+static struct regulator_consumer_supply db5500_esram12_consumers[] = {
+ REGULATOR_SUPPLY("debug", "reg-virt-consumer.4"),
+ REGULATOR_SUPPLY("v-esram12", "mcde"),
+ REGULATOR_SUPPLY("esram12", "hva"),
+};
+
+#define DB5500_REGULATOR_SWITCH(lower, upper) \
+[DB5500_REGULATOR_SWITCH_##upper] = { \
+ .constraints = { \
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \
+ }, \
+ .consumer_supplies = db5500_##lower##_consumers, \
+ .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\
}
+#define DB5500_REGULATOR_SWITCH_VAPE(lower, upper) \
+[DB5500_REGULATOR_SWITCH_##upper] = { \
+ .supply_regulator = "db5500-vape", \
+ .constraints = { \
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \
+ }, \
+ .consumer_supplies = db5500_##lower##_consumers, \
+ .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\
+} \
+
+static struct regulator_init_data db5500_regulators[DB5500_NUM_REGULATORS] = {
+ [DB5500_REGULATOR_VAPE] = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = db5500_vape_consumers,
+ .num_consumer_supplies = ARRAY_SIZE(db5500_vape_consumers),
+ },
+ DB5500_REGULATOR_SWITCH_VAPE(sga, SGA),
+ DB5500_REGULATOR_SWITCH_VAPE(hva, HVA),
+ DB5500_REGULATOR_SWITCH_VAPE(sia, SIA),
+ DB5500_REGULATOR_SWITCH_VAPE(disp, DISP),
+ /*
+ * ESRAM12 is put in retention by the firmware when VAPE is
+ * turned off so there's no need to hold VAPE.
+ */
+ DB5500_REGULATOR_SWITCH(esram12, ESRAM12),
+};
+
+static struct mfd_cell db5500_prcmu_devs[] = {
+ {
+ .name = "db5500-prcmu-regulators",
+ .platform_data = &db5500_regulators,
+ .pdata_size = sizeof(db5500_regulators),
+ },
+ {
+ .name = "cpufreq-u5500",
+ },
+};
+
/**
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-int __init db5500_prcmu_init(void)
+static int __init db5500_prcmu_probe(struct platform_device *pdev)
{
- int r = 0;
+ int err = 0;
if (ux500_is_svp() || !cpu_is_u5500())
return -ENODEV;
/* Clean up the mailbox interrupts after pre-kernel code. */
- writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
+ writel(ALL_MBOX_BITS, _PRCMU_BASE + PRCM_ARM_IT1_CLEAR);
- r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
- prcmu_irq_thread_fn, 0, "prcmu", NULL);
- if (r < 0) {
+ err = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
+ prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+ if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto no_irq_return;
}
- return 0;
+
+ err = mfd_add_devices(&pdev->dev, 0, db5500_prcmu_devs,
+ ARRAY_SIZE(db5500_prcmu_devs), NULL,
+ 0);
+
+ if (err)
+ pr_err("prcmu: Failed to add subdevices\n");
+ else
+ pr_info("DB5500 PRCMU initialized\n");
+
+no_irq_return:
+ return err;
+
+}
+
+static struct platform_driver db5500_prcmu_driver = {
+ .driver = {
+ .name = "db5500-prcmu",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init db5500_prcmu_init(void)
+{
+ return platform_driver_probe(&db5500_prcmu_driver, db5500_prcmu_probe);
}
arch_initcall(db5500_prcmu_init);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5be32489714..7c26c41a7ef 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -30,11 +30,13 @@
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
+#include <linux/mfd/abx500.h>
#include <asm/hardware/gic.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
#include <mach/id.h>
+#include <mach/prcmu-debug.h>
#include "dbx500-prcmu-regs.h"
/* Offset for the firmware version within the TCPM */
@@ -70,6 +72,8 @@
#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
+#define PRCM_TCDM_VOICE_CALL_FLAG 0xDD4 /* 4 bytes */
+
#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
@@ -214,10 +218,8 @@
#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
-#define PRCMU_I2C_WRITE(slave) \
- (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
-#define PRCMU_I2C_READ(slave) \
- (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
+#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6))
+#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6))
#define PRCMU_I2C_STOP_EN BIT(3)
/* Mailbox 5 ACKs */
@@ -424,6 +426,13 @@ static DEFINE_SPINLOCK(clkout_lock);
/* Global var to runtime determine TCDM base for v2 or v1 */
static __iomem void *tcdm_base;
+/*
+ * Copies of the startup values of the reset status register and the SW reset
+ * code.
+ */
+static u32 reset_status_copy;
+static u16 reset_code_copy;
+
struct clk_mgt {
void __iomem *reg;
u32 pllsw;
@@ -637,6 +646,26 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
spin_unlock_irqrestore(&prcmu_lock, flags);
}
+/*
+ * Dump AB8500 registers, PRCMU registers and PRCMU data memory
+ * on critical errors.
+ */
+static void db8500_prcmu_debug_dump(const char *func,
+ bool dump_prcmu, bool dump_abb)
+{
+ printk(KERN_DEBUG"%s: timeout\n", func);
+
+ /* Dump AB8500 registers */
+ if (dump_abb)
+ abx500_dump_all_banks();
+
+ /* Dump prcmu registers and data memory */
+ if (dump_prcmu) {
+ prcmu_debug_dump_regs();
+ prcmu_debug_dump_data_mem();
+ }
+}
+
struct prcmu_fw_version *prcmu_get_fw_version(void)
{
return fw_info.valid ? &fw_info.version : NULL;
@@ -648,6 +677,11 @@ bool prcmu_has_arm_maxopp(void)
PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
}
+void db8500_prcmu_vc(bool enable)
+{
+ writel((enable ? 0xF : 0), (tcdm_base + PRCM_TCDM_VOICE_CALL_FLAG));
+}
+
/**
* prcmu_get_boot_status - PRCMU boot status checking
* Returns: the current PRCMU boot status
@@ -1049,7 +1083,7 @@ int db8500_prcmu_set_ddr_opp(u8 opp)
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
- if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
+ if (!cpu_is_u8500v20())
writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
@@ -1111,12 +1145,14 @@ unlock_and_return:
int db8500_prcmu_set_ape_opp(u8 opp)
{
int r = 0;
+ u8 prcmu_opp_req;
if (opp == mb1_transfer.ape_opp)
return 0;
mutex_lock(&mb1_transfer.lock);
+ /* Exit APE_50_PARTLY_25_OPP */
if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)
request_even_slower_clocks(false);
@@ -1126,20 +1162,22 @@ int db8500_prcmu_set_ape_opp(u8 opp)
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
+ prcmu_opp_req = (opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp;
+
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
- writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp),
- (tcdm_base + PRCM_REQ_MB1_APE_OPP));
+ writeb(prcmu_opp_req, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
- (mb1_transfer.ack.ape_opp != opp))
+ (mb1_transfer.ack.ape_opp != prcmu_opp_req))
r = -EIO;
skip_message:
if ((!r && (opp == APE_50_PARTLY_25_OPP)) ||
+ /* Set APE_50_PARTLY_25_OPP back in case new opp failed */
(r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)))
request_even_slower_clocks(true);
if (!r)
@@ -1322,6 +1360,7 @@ int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state)
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
+ db8500_prcmu_debug_dump(__func__, true, true);
goto unlock_and_return;
}
@@ -1416,6 +1455,7 @@ static int request_sysclk(bool enable)
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
+ db8500_prcmu_debug_dump(__func__, true, true);
}
mutex_unlock(&mb3_transfer.sysclk_lock);
@@ -2190,6 +2230,7 @@ int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
+ db8500_prcmu_debug_dump(__func__, true, false);
} else {
r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
}
@@ -2240,6 +2281,7 @@ int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size)
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
+ db8500_prcmu_debug_dump(__func__, true, false);
} else {
r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
}
@@ -2287,6 +2329,7 @@ retry:
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
+ db8500_prcmu_debug_dump(__func__, true, true);
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
goto unlock_and_return;
@@ -2309,6 +2352,7 @@ retry:
if (wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000)))
goto retry;
+ db8500_prcmu_debug_dump(__func__, true, true);
pr_crit("prcmu: %s timed out (5 s) waiting for AC_SLEEP_ACK.\n",
__func__);
}
@@ -2335,6 +2379,7 @@ void prcmu_ac_sleep_req()
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
+ db8500_prcmu_debug_dump(__func__, true, true);
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
}
@@ -2370,7 +2415,17 @@ void db8500_prcmu_system_reset(u16 reset_code)
*/
u16 db8500_prcmu_get_reset_code(void)
{
- return readw(tcdm_base + PRCM_SW_RST_REASON);
+ return reset_code_copy;
+}
+
+/**
+ * db8500_prcmu_get_reset_status - Retrieve reset status
+ *
+ * Retrieves the value of the reset status register as read at startup.
+ */
+u32 db8500_prcmu_get_reset_status(void)
+{
+ return reset_status_copy;
}
/**
@@ -2437,6 +2492,13 @@ static bool read_mailbox_0(void)
if (ev & WAKEUP_BIT_SYSCLK_OK)
complete(&mb3_transfer.sysclk_work);
+ prcmu_debug_register_mbox0_event(ev,
+ (mb0_transfer.req.dbb_irqs |
+ mb0_transfer.req.dbb_wakeups |
+ WAKEUP_BIT_AC_WAKE_ACK |
+ WAKEUP_BIT_AC_SLEEP_ACK |
+ WAKEUP_BIT_SYSCLK_OK));
+
ev &= mb0_transfer.req.dbb_irqs;
for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
@@ -2561,6 +2623,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data)
bits -= MBOX_BIT(n);
if (read_mailbox[n]())
r = IRQ_WAKE_THREAD;
+ prcmu_debug_register_interrupt(n);
}
}
return r;
@@ -2646,29 +2709,38 @@ static char *fw_project_name(u8 project)
void __init db8500_prcmu_early_init(void)
{
unsigned int i;
- if (cpu_is_u8500v2()) {
- void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
-
- if (tcpm_base != NULL) {
- u32 version;
- version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
- fw_info.version.project = version & 0xFF;
- fw_info.version.api_version = (version >> 8) & 0xFF;
- fw_info.version.func_version = (version >> 16) & 0xFF;
- fw_info.version.errata = (version >> 24) & 0xFF;
- fw_info.valid = true;
- pr_info("PRCMU firmware: %s, version %d.%d.%d\n",
- fw_project_name(fw_info.version.project),
- (version >> 8) & 0xFF, (version >> 16) & 0xFF,
- (version >> 24) & 0xFF);
- iounmap(tcpm_base);
- }
+ void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
+ void __iomem *sec_base;
+
+ if (tcpm_base != NULL) {
+ u32 version;
+ version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
+ fw_info.version.project = version & 0xFF;
+ fw_info.version.api_version = (version >> 8) & 0xFF;
+ fw_info.version.func_version = (version >> 16) & 0xFF;
+ fw_info.version.errata = (version >> 24) & 0xFF;
+ fw_info.valid = true;
+ pr_info("PRCMU firmware: %s, version %d.%d.%d\n",
+ fw_project_name(fw_info.version.project),
+ (version >> 8) & 0xFF, (version >> 16) & 0xFF,
+ (version >> 24) & 0xFF);
+ iounmap(tcpm_base);
+ }
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
- } else {
- pr_err("prcmu: Unsupported chip version\n");
- BUG();
+ tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+
+ /*
+ * Copy the value of the reset status register and if needed also
+ * the software reset code.
+ */
+ sec_base = ioremap_nocache(U8500_PRCMU_SEC_BASE, SZ_4K);
+ if (sec_base != NULL) {
+ reset_status_copy = readl(sec_base +
+ DB8500_SEC_PRCM_RESET_STATUS);
+ iounmap(sec_base);
}
+ if (reset_status_copy & DB8500_SEC_PRCM_RESET_STATUS_APE_SOFTWARE_RESET)
+ reset_code_copy = readw(tcdm_base + PRCM_SW_RST_REASON);
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
@@ -2734,6 +2806,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("vcore", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+ REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
@@ -2743,7 +2816,7 @@ static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
};
static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
- REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
+ REGULATOR_SUPPLY("vsupply", "b2r2_core"),
REGULATOR_SUPPLY("vsupply", "mcde"),
};
@@ -2962,9 +3035,6 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
{
int err = 0;
- if (ux500_is_svp())
- return -ENODEV;
-
init_prcm_registers();
/* Clean up the mailbox interrupts after pre-kernel code. */
@@ -2978,8 +3048,7 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
goto no_irq_return;
}
- if (cpu_is_u8500v20_or_later())
- prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+ prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
ARRAY_SIZE(db8500_prcmu_devs), NULL,
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index 3a0bf91d778..0835a57dac1 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -34,6 +34,9 @@
#define PRCM_PER5CLK_MGT PRCM_CLK_MGT(0x038)
#define PRCM_PER6CLK_MGT PRCM_CLK_MGT(0x03C)
#define PRCM_PER7CLK_MGT PRCM_CLK_MGT(0x040)
+#define PRCM_PWMCLK_MGT PRCM_CLK_MGT(0x044) /* for DB5500 */
+#define PRCM_IRDACLK_MGT PRCM_CLK_MGT(0x048) /* for DB5500 */
+#define PRCM_IRRCCLK_MGT PRCM_CLK_MGT(0x04C) /* for DB5500 */
#define PRCM_LCDCLK_MGT PRCM_CLK_MGT(0x044)
#define PRCM_BMLCLK_MGT PRCM_CLK_MGT(0x04C)
#define PRCM_HSITXCLK_MGT PRCM_CLK_MGT(0x050)
@@ -124,7 +127,6 @@
#define PRCM_ITSTATUS4 (_PRCMU_BASE + 0x168)
#define PRCM_ITSTATUS5 (_PRCMU_BASE + 0x484)
#define PRCM_ITCLEAR5 (_PRCMU_BASE + 0x488)
-#define PRCM_ARMIT_MASKXP70_IT (_PRCMU_BASE + 0x1018)
/* System reset register */
#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
@@ -247,4 +249,17 @@
/* System reset register */
#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
+/* Secure read-only registers */
+#define DB8500_SEC_PRCM_RESET_STATUS 0x03C
+#define DB8500_SEC_PRCM_RESET_STATUS_A9_CPU0_WATCHDOG_RESET BIT(0)
+#define DB8500_SEC_PRCM_RESET_STATUS_A9_CPU1_WATCHDOG_RESET BIT(1)
+#define DB8500_SEC_PRCM_RESET_STATUS_APE_SOFTWARE_RESET BIT(2)
+#define DB8500_SEC_PRCM_RESET_STATUS_APE_RESET BIT(3)
+#define DB8500_SEC_PRCM_RESET_STATUS_SECURE_WATCHDOG BIT(4)
+#define DB8500_SEC_PRCM_RESET_STATUS_POWER_ON_RESET BIT(5)
+#define DB8500_SEC_PRCM_RESET_STATUS_A9_RESTART BIT(6)
+#define DB8500_SEC_PRCM_RESET_STATUS_APE_RESTART BIT(7)
+#define DB8500_SEC_PRCM_RESET_STATUS_MODEM_SOFTWARE_RESET BIT(8)
+#define DB8500_SEC_PRCM_RESET_STATUS_BOOT_ENGI BIT(16)
+
#endif /* __DB8500_PRCMU_REGS_H */
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 2dd8d49cb30..6b8f9417c00 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -772,7 +772,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
-
+back:
for (i = 0; i < num; i++) {
int bank = num - i - 1;
u8 status = isr[i];
@@ -794,6 +794,22 @@ static irqreturn_t stmpe_irq(int irq, void *data)
stmpe_reg_write(stmpe, israddr + i, clear);
}
+ /*
+ It may happen that on the first status read interrupt
+ sources may not showup, so read one more time.
+ */
+ ret = stmpe_block_read(stmpe, israddr, num, isr);
+ if (ret >= 0) {
+ for (i = 0; i < num; i++) {
+ int bank = num - i - 1;
+ u8 status = isr[i];
+
+ status &= stmpe->ier[bank];
+ if (status)
+ goto back;
+ }
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 00000000000..91211f29623
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tc35892.h>
+
+#define TC35892_CLKMODE_MODCTL_SLEEP 0x0
+#define TC35892_CLKMODE_MODCTL_OPERATION (1 << 0)
+
+/**
+ * tc35892_reg_read() - read a single TC35892 register
+ * @tc35892: Device to read from
+ * @reg: Register to read
+ */
+int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_read);
+
+/**
+ * tc35892_reg_read() - write a single TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to read
+ * @data: Value to write
+ */
+int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_write);
+
+/**
+ * tc35892_block_read() - read multiple TC35892 registers
+ * @tc35892: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_read);
+
+/**
+ * tc35892_block_write() - write multiple TC35892 registers
+ * @tc35892: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_write);
+
+/**
+ * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @values: Value to set
+ */
+int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&tc35892->lock);
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = tc35892_reg_write(tc35892, reg, ret);
+
+out:
+ mutex_unlock(&tc35892->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_set_bits);
+
+static struct resource gpio_resources[] = {
+ {
+ .start = TC35892_INT_GPIIRQ,
+ .end = TC35892_INT_GPIIRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tc35892_devs[] = {
+ {
+ .name = "tc35892-gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = &gpio_resources[0],
+ },
+};
+
+static irqreturn_t tc35892_irq(int irq, void *data)
+{
+ struct tc35892 *tc35892 = data;
+ int status;
+
+again:
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status < 0)
+ return IRQ_NONE;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ handle_nested_irq(tc35892->irq_base + bit);
+ status &= ~(1 << bit);
+ }
+
+ /*
+ * A dummy read or write (to any register) appears to be necessary to
+ * have the last interrupt clear (for example, GPIO IC write) take
+ * effect. In such a case, recheck for any interrupt which is still
+ * pending.
+ */
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status)
+ goto again;
+
+ return IRQ_HANDLED;
+}
+
+static void tc35892_irq_dummy(unsigned int irq)
+{
+ /* No mask/unmask at this level */
+}
+
+static struct irq_chip tc35892_irq_chip = {
+ .name = "tc35892",
+ .irq_mask = tc35892_irq_dummy,
+ .irq_unmask = tc35892_irq_dummy,
+};
+
+static int tc35892_irq_init(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+ irq_set_chip_data(irq, tc35892);
+ irq_set_chip_and_handler(irq, &tc35892_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_irq_remove(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+ }
+}
+
+static int tc35892_chip_init(struct tc35892 *tc35892)
+{
+ int manf, ver, ret;
+
+ manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
+ if (manf < 0)
+ return manf;
+
+ ver = tc35892_reg_read(tc35892, TC35892_VERSION);
+ if (ver < 0)
+ return ver;
+
+ if (manf != TC35892_MANFCODE_MAGIC) {
+ dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
+ return -EINVAL;
+ }
+
+ dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
+
+ /*
+ * Put everything except the IRQ module into reset;
+ * also spare the GPIO module for any pin initialization
+ * done during pre-kernel boot
+ */
+ ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_TIMRST
+ | TC35892_RSTCTRL_ROTRST
+ | TC35892_RSTCTRL_KBDRST);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the reset interrupt. */
+ return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
+}
+
+static int __devinit tc35892_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tc35892_platform_data *pdata = i2c->dev.platform_data;
+ struct tc35892 *tc35892;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
+ if (!tc35892)
+ return -ENOMEM;
+
+ mutex_init(&tc35892->lock);
+
+ tc35892->dev = &i2c->dev;
+ tc35892->i2c = i2c;
+ tc35892->pdata = pdata;
+ tc35892->irq_base = pdata->irq_base;
+ tc35892->num_gpio = id->driver_data;
+
+ i2c_set_clientdata(i2c, tc35892);
+
+ ret = tc35892_chip_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = tc35892_irq_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tc35892", tc35892);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
+ ARRAY_SIZE(tc35892_devs), NULL,
+ tc35892->irq_base);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to add children\n");
+ goto out_freeirq;
+ }
+
+ return 0;
+
+out_freeirq:
+ free_irq(tc35892->i2c->irq, tc35892);
+out_removeirq:
+ tc35892_irq_remove(tc35892);
+out_free:
+ kfree(tc35892);
+ return ret;
+}
+
+static int __devexit tc35892_remove(struct i2c_client *client)
+{
+ struct tc35892 *tc35892 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(tc35892->dev);
+
+ free_irq(tc35892->i2c->irq, tc35892);
+ tc35892_irq_remove(tc35892);
+
+ kfree(tc35892);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static u32 sleep_regs[] = {
+ TC35892_IOPC0_L,
+ TC35892_IOPC0_H,
+ TC35892_IOPC1_L,
+ TC35892_IOPC1_H,
+ TC35892_IOPC2_L,
+ TC35892_IOPC2_H,
+ TC35892_DRIVE0_L,
+ TC35892_DRIVE0_H,
+ TC35892_DRIVE1_L,
+ TC35892_DRIVE1_H,
+ TC35892_DRIVE2_L,
+ TC35892_DRIVE2_H,
+ TC35892_DRIVE3,
+ TC35892_GPIODATA0,
+ TC35892_GPIOMASK0,
+ TC35892_GPIODATA1,
+ TC35892_GPIOMASK1,
+ TC35892_GPIODATA2,
+ TC35892_GPIOMASK2,
+ TC35892_GPIODIR0,
+ TC35892_GPIODIR1,
+ TC35892_GPIODIR2,
+ TC35892_GPIOIE0,
+ TC35892_GPIOIE1,
+ TC35892_GPIOIE2,
+ TC35892_RSTCTRL,
+ TC35892_CLKCFG,
+};
+
+static u8 sleep_regs_val[] = {
+ 0x00, /* TC35892_IOPC0_L */
+ 0x00, /* TC35892_IOPC0_H */
+ 0x00, /* TC35892_IOPC1_L */
+ 0x00, /* TC35892_IOPC1_H */
+ 0x00, /* TC35892_IOPC2_L */
+ 0x00, /* TC35892_IOPC2_H */
+ 0xff, /* TC35892_DRIVE0_L */
+ 0xff, /* TC35892_DRIVE0_H */
+ 0xff, /* TC35892_DRIVE1_L */
+ 0xff, /* TC35892_DRIVE1_H */
+ 0xff, /* TC35892_DRIVE2_L */
+ 0xff, /* TC35892_DRIVE2_H */
+ 0x0f, /* TC35892_DRIVE3 */
+ 0x80, /* TC35892_GPIODATA0 */
+ 0x80, /* TC35892_GPIOMASK0 */
+ 0x80, /* TC35892_GPIODATA1 */
+ 0x80, /* TC35892_GPIOMASK1 */
+ 0x06, /* TC35892_GPIODATA2 */
+ 0x06, /* TC35892_GPIOMASK2 */
+ 0xf0, /* TC35892_GPIODIR0 */
+ 0xe0, /* TC35892_GPIODIR1 */
+ 0xee, /* TC35892_GPIODIR2 */
+ 0x0f, /* TC35892_GPIOIE0 */
+ 0x1f, /* TC35892_GPIOIE1 */
+ 0x11, /* TC35892_GPIOIE2 */
+ 0x0f, /* TC35892_RSTCTRL */
+ 0xb0 /* TC35892_CLKCFG */
+
+};
+
+static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)];
+
+static int tc35892_suspend(struct device *dev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(dev);
+ struct i2c_client *client = tc35892->i2c;
+ int ret = 0;
+ int i, j;
+ int val;
+
+ /* Put the system to sleep mode */
+ if (!device_may_wakeup(&client->dev)) {
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ val = tc35892_reg_read(tc35892,
+ sleep_regs[i]);
+ if (val < 0)
+ goto out;
+
+ sleep_regs_backup[i] = (u8) (val & 0xff);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_val[i]);
+ if (ret < 0)
+ goto fail;
+
+ }
+
+ ret = tc35892_reg_write(tc35892,
+ TC35892_CLKMODE,
+ TC35892_CLKMODE_MODCTL_SLEEP);
+ }
+out:
+ return ret;
+fail:
+ for (j = 0; j <= i; j++) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static int tc35892_resume(struct device *dev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(dev);
+ struct i2c_client *client = tc35892->i2c;
+ int ret = 0;
+ int i;
+
+ /* Enable the system into operation */
+ if (!device_may_wakeup(&client->dev))
+ {
+ ret = tc35892_reg_write(tc35892,
+ TC35892_CLKMODE,
+ TC35892_CLKMODE_MODCTL_OPERATION);
+ if (ret < 0)
+ goto out;
+
+ for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) {
+ ret = tc35892_reg_write(tc35892,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ /* Not much to do here if we fail */
+ if (ret < 0)
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
+static const struct dev_pm_ops tc35892_dev_pm_ops = {
+ .suspend = tc35892_suspend,
+ .resume = tc35892_resume,
+};
+#endif
+
+static const struct i2c_device_id tc35892_id[] = {
+ { "tc35892", 24 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35892_id);
+
+static struct i2c_driver tc35892_driver = {
+ .driver.name = "tc35892",
+ .driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &tc35892_dev_pm_ops,
+#endif
+ .probe = tc35892_probe,
+ .remove = __devexit_p(tc35892_remove),
+ .id_table = tc35892_id,
+};
+
+static int __init tc35892_init(void)
+{
+ return i2c_add_driver(&tc35892_driver);
+}
+subsys_initcall(tc35892_init);
+
+static void __exit tc35892_exit(void)
+{
+ i2c_del_driver(&tc35892_driver);
+}
+module_exit(tc35892_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 MFD core driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index de979742c6f..0e79fe2d214 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -358,16 +358,114 @@ static int __devexit tc3589x_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM
+
+static u32 sleep_regs[] = {
+ TC3589x_IOPC0_L,
+ TC3589x_IOPC0_H,
+ TC3589x_IOPC1_L,
+ TC3589x_IOPC1_H,
+ TC3589x_IOPC2_L,
+ TC3589x_IOPC2_H,
+ TC3589x_DRIVE0_L,
+ TC3589x_DRIVE0_H,
+ TC3589x_DRIVE1_L,
+ TC3589x_DRIVE1_H,
+ TC3589x_DRIVE2_L,
+ TC3589x_DRIVE2_H,
+ TC3589x_DRIVE3,
+ TC3589x_GPIODATA0,
+ TC3589x_GPIOMASK0,
+ TC3589x_GPIODATA1,
+ TC3589x_GPIOMASK1,
+ TC3589x_GPIODATA2,
+ TC3589x_GPIOMASK2,
+ TC3589x_GPIODIR0,
+ TC3589x_GPIODIR1,
+ TC3589x_GPIODIR2,
+ TC3589x_GPIOIE0,
+ TC3589x_GPIOIE1,
+ TC3589x_GPIOIE2,
+ TC3589x_RSTCTRL,
+ TC3589x_CLKCFG,
+};
+
+static u8 sleep_regs_val[] = {
+ 0x00, /* TC3589x_IOPC0_L */
+ 0x00, /* TC3589x_IOPC0_H */
+ 0x00, /* TC3589x_IOPC1_L */
+ 0x00, /* TC3589x_IOPC1_H */
+ 0x00, /* TC3589x_IOPC2_L */
+ 0x00, /* TC3589x_IOPC2_H */
+ 0xff, /* TC3589x_DRIVE0_L */
+ 0xff, /* TC3589x_DRIVE0_H */
+ 0xff, /* TC3589x_DRIVE1_L */
+ 0xff, /* TC3589x_DRIVE1_H */
+ 0xff, /* TC3589x_DRIVE2_L */
+ 0xff, /* TC3589x_DRIVE2_H */
+ 0x0f, /* TC3589x_DRIVE3 */
+ 0x80, /* TC3589x_GPIODATA0 */
+ 0x80, /* TC3589x_GPIOMASK0 */
+ 0x80, /* TC3589x_GPIODATA1 */
+ 0x80, /* TC3589x_GPIOMASK1 */
+ 0x06, /* TC3589x_GPIODATA2 */
+ 0x06, /* TC3589x_GPIOMASK2 */
+ 0xf0, /* TC3589x_GPIODIR0 */
+ 0xe0, /* TC3589x_GPIODIR1 */
+ 0xee, /* TC3589x_GPIODIR2 */
+ 0x0f, /* TC3589x_GPIOIE0 */
+ 0x1f, /* TC3589x_GPIOIE1 */
+ 0x11, /* TC3589x_GPIOIE2 */
+ 0x0f, /* TC3589x_RSTCTRL */
+ 0xb0 /* TC3589x_CLKCFG */
+
+};
+
+static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)];
+
static int tc3589x_suspend(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
+ int i, j;
+ int val;
+
+ /* Put the system to sleep mode */
+ if (!device_may_wakeup(&client->dev)) {
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ val = tc3589x_reg_read(tc3589x,
+ sleep_regs[i]);
+ if (val < 0)
+ goto out;
+
+ sleep_regs_backup[i] = (u8) (val & 0xff);
+ }
- /* put the system to sleep mode */
- if (!device_may_wakeup(&client->dev))
- ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
- TC3589x_CLKMODE_MODCTL_SLEEP);
+ for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_val[i]);
+ if (ret < 0)
+ goto fail;
+
+ }
+
+ ret = tc3589x_reg_write(tc3589x,
+ TC3589x_CLKMODE,
+ TC3589x_CLKMODE_MODCTL_SLEEP);
+ } else {
+ enable_irq_wake(client->irq);
+ }
+out:
+ return ret;
+fail:
+ for (j = 0; j <= i; j++) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ if (ret < 0)
+ break;
+ }
return ret;
}
@@ -377,12 +475,29 @@ static int tc3589x_resume(struct device *dev)
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
+ int i;
- /* enable the system into operation */
+ /* Enable the system into operation */
if (!device_may_wakeup(&client->dev))
- ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
- TC3589x_CLKMODE_MODCTL_OPERATION);
-
+ {
+ ret = tc3589x_reg_write(tc3589x,
+ TC3589x_CLKMODE,
+ TC3589x_CLKMODE_MODCTL_OPERATION);
+ if (ret < 0)
+ goto out;
+
+ for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) {
+ ret = tc3589x_reg_write(tc3589x,
+ sleep_regs[i],
+ sleep_regs_backup[i]);
+ /* Not much to do here if we fail */
+ if (ret < 0)
+ break;
+ }
+ } else {
+ disable_irq_wake(client->irq);
+ }
+out:
return ret;
}
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index a293b978e27..d7b9e0c60ea 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -195,6 +195,7 @@ static int __devinit tps6105x_probe(struct i2c_client *client,
return 0;
fail:
+ i2c_set_clientdata(client, NULL);
kfree(tps6105x);
return ret;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c7795096d43..f92560ef750 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -461,6 +461,40 @@ config BMP085
To compile this driver as a module, choose M here: the
module will be called bmp085.
+config DISPDEV
+ bool "Display overlay device"
+ depends on FB_MCDE
+ default n
+ help
+ This driver provides a way to use a second overlay for a display (in
+ addition to the framebuffer). The device allows for registration of
+ userspace buffers to be used with the overlay.
+
+config COMPDEV
+ bool "Display composition device"
+ depends on FB_MCDE && HWMEM
+ default n
+ help
+ This driver provides a way to use several overlays for a display.
+ This driver replaces the use of the framebuffer The device allows
+ for posting userspace buffers to be used with the overlays.
+
+config CLONEDEV
+ bool "Display cloning device"
+ depends on FB_MCDE && HWMEM && COMPDEV
+ default n
+ help
+ This driver provides a way to clone content between two compdev
+ devices.
+
+config CLONEDEV_DEBUG
+ bool "Display cloning device debug"
+ depends on CLONEDEV
+ default n
+ help
+ This driver provides a way to clone content between two compdev
+ devices.
+
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
depends on PCI
@@ -481,6 +515,14 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
+config HWMEM
+ bool "Hardware memory driver"
+ default n
+ help
+ This driver provides a way to allocate contiguous system memory which
+ can be used by hardware. It also enables accessing hwmem allocated
+ memory buffers through a secure id which can be shared across processes.
+
config USB_SWITCH_FSA9480
tristate "FSA9480 USB Switch"
depends on I2C
@@ -498,6 +540,7 @@ config MAX8997_MUIC
Maxim MAX8997 PMIC.
The MAX8997 MUIC is a USB port accessory detector and switch.
+source "drivers/misc/Kconfig.stm"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Kconfig.stm b/drivers/misc/Kconfig.stm
new file mode 100644
index 00000000000..d509c85c79f
--- /dev/null
+++ b/drivers/misc/Kconfig.stm
@@ -0,0 +1,120 @@
+menuconfig STM_TRACE
+ bool "STM MIPI Trace driver"
+ depends on ARCH_U8500
+ help
+ Simple System Trace Module driver. It allows to use and configure the
+ STM, either from kernel space, or from user space.
+
+if STM_TRACE
+
+config STM_NUMBER_OF_CHANNEL
+ int
+ default 512 if ARCH_U8500
+ default 256
+ help
+ Number Max of channels always a multiple of 256
+
+config STM_DEFAULT_MASTERS_MODES
+ hex "channel mode"
+ default 0xffffffff
+ help
+ Default config for enabling hardware mode tracing
+
+config STM_PRINTK
+ bool "printk support"
+ depends on STM_TRACE
+ help
+ Duplicate printk output on STM printk channel & activate stm_printk
+
+config STM_PRINTK_CHANNEL
+ int "printk channel"
+ range 0 255
+ depends on STM_PRINTK
+ default 255
+ help
+ STM printk channel number
+
+config STM_FTRACE
+ bool "functions tracing"
+ depends on FTRACE
+ default y
+ help
+ Output function tracing on STM dedicated channel
+
+config STM_FTRACE_CHANNEL
+ int "ftrace channel"
+ range 0 255
+ depends on STM_FTRACE
+ default 254
+ help
+ STM ftrace channel number
+
+config STM_CTX_SWITCH
+ bool "Context switch tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler context switch on STM dedicated channel
+
+config STM_CTX_SWITCH_CHANNEL
+ int "Context switch channel"
+ range 0 255
+ depends on STM_CTX_SWITCH
+ default 253
+ help
+ STM Context switch channel number
+
+config STM_WAKEUP
+ bool "Scheduler wakeup tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler wakeup on STM dedicated channel
+
+config STM_WAKEUP_CHANNEL
+ int "Wakeup channel"
+ range 0 255
+ depends on STM_WAKEUP
+ default 252
+ help
+ STM scheduler wakeup channel number
+
+config STM_STACK_TRACE
+ bool "Stack tracing"
+ depends on STACKTRACE
+ default y
+ help
+ Output stack tracing on STM dedicated channel
+
+config STM_STACK_TRACE_CHANNEL
+ int "Stack trace channel"
+ range 0 255
+ depends on STM_STACK_TRACE
+ default 251
+ help
+ STM stack trace channel number
+
+config STM_TRACE_PRINTK
+ bool "trace printk & binary printk support"
+ depends on TRACING
+ default y
+ help
+ Duplicate trace printk output on STM printk channel
+
+config STM_TRACE_PRINTK_CHANNEL
+ int "trace_printk channel"
+ range 0 255
+ depends on TRACING
+ default 250
+ help
+ STM trace_printk channel number
+
+config STM_TRACE_BPRINTK_CHANNEL
+ int "trace_bprintk channel"
+ range 0 255
+ depends on TRACING
+ default 249
+ help
+ STM trace binary printk channel number
+
+endif
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 3e1d80106f0..2741a01610f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,11 @@ obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
+obj-$(CONFIG_STM_TRACE) += stm.o
+obj-$(CONFIG_HWMEM) += hwmem/
+obj-$(CONFIG_DISPDEV) += dispdev/
+obj-$(CONFIG_COMPDEV) += compdev/
+obj-$(CONFIG_CLONEDEV) += clonedev/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d..9d864e4db5a 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -8,10 +8,11 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pwm.h>
+#include <linux/clk.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/module.h>
-
+#include <linux/mfd/ab8500/pwmleds.h>
/*
* PWM Out generators
* Bank: 0x10
@@ -19,6 +20,11 @@
#define AB8500_PWM_OUT_CTRL1_REG 0x60
#define AB8500_PWM_OUT_CTRL2_REG 0x61
#define AB8500_PWM_OUT_CTRL7_REG 0x66
+#define AB8505_PWM_OUT_BLINK_CTRL1_REG 0x68
+#define AB8505_PWM_OUT_BLINK_CTRL4_REG 0x6B
+#define AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT 4
+#define AB8505_PWM_OUT_BLINK_DUTYMASK (0x0F << AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT)
+
/* backlight driver constants */
#define ENABLE_PWM 1
@@ -27,12 +33,73 @@
struct pwm_device {
struct device *dev;
struct list_head node;
+ struct clk *clk;
const char *label;
unsigned int pwm_id;
+ unsigned int num_pwm;
+ unsigned int blink_en;
+ struct ab8500 *parent;
+ bool clk_enabled;
};
static LIST_HEAD(pwm_list);
+int pwm_config_blink(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ int ret;
+ unsigned int value;
+ u8 reg;
+ if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) {
+ dev_err(pwm->dev, "setting blinking for this "
+ "device not supported\n");
+ return -EINVAL;
+ }
+ /*
+ * get the period value that is to be written to
+ * AB8500_PWM_OUT_BLINK_CTRL1 REGS[0:2]
+ */
+ value = period_ns & 0x07;
+ /*
+ * get blink duty value to be written to
+ * AB8500_PWM_OUT_BLINK_CTRL REGS[7:4]
+ */
+ value |= ((duty_ns << AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT) &
+ AB8505_PWM_OUT_BLINK_DUTYMASK);
+
+ reg = AB8505_PWM_OUT_BLINK_CTRL1_REG + (pwm->pwm_id - 1);
+
+ ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
+ reg, (u8)value);
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to config PWM blink,Error %d\n",
+ pwm->label, ret);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_config_blink);
+
+int pwm_blink_ctrl(struct pwm_device *pwm , int enable)
+{
+ int ret;
+
+ if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) {
+ dev_err(pwm->dev, "setting blinking for this "
+ "device not supported\n");
+ return -EINVAL;
+ }
+ /*
+ * Enable/disable blinking feature for corresponding PWMOUT
+ * channel depending on value of enable.
+ */
+ ret = abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8505_PWM_OUT_BLINK_CTRL4_REG,
+ 1 << (pwm->pwm_id-1), enable << (pwm->pwm_id-1));
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to control PWM blink,Error %d\n",
+ pwm->label, ret);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_blink_ctrl);
+
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
int ret = 0;
@@ -67,11 +134,19 @@ int pwm_enable(struct pwm_device *pwm)
{
int ret;
+ if (!pwm->clk_enabled) {
+ ret = clk_enable(pwm->clk);
+ if (ret < 0) {
+ dev_err(pwm->dev, "failed to enable clock\n");
+ return ret;
+ }
+ pwm->clk_enabled = true;
+ }
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (pwm->pwm_id-1), ENABLE_PWM);
+ 1 << (pwm->pwm_id-1), 1 << (pwm->pwm_id-1));
if (ret < 0)
- dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
+ dev_err(pwm->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
return ret;
}
@@ -84,9 +159,27 @@ void pwm_disable(struct pwm_device *pwm)
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << (pwm->pwm_id-1), DISABLE_PWM);
+ /*
+ * Workaround to set PWM in disable.
+ * If enable bit is not toggled the PWM might output 50/50 duty cycle
+ * even though it should be disabled
+ */
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1),
+ ENABLE_PWM << (pwm->pwm_id-1));
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), DISABLE_PWM);
+
if (ret < 0)
dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
+ if (pwm->clk_enabled) {
+ clk_disable(pwm->clk);
+ pwm->clk_enabled = false;
+ }
+
return;
}
EXPORT_SYMBOL(pwm_disable);
@@ -94,7 +187,6 @@ EXPORT_SYMBOL(pwm_disable);
struct pwm_device *pwm_request(int pwm_id, const char *label)
{
struct pwm_device *pwm;
-
list_for_each_entry(pwm, &pwm_list, node) {
if (pwm->pwm_id == pwm_id) {
pwm->label = label;
@@ -113,30 +205,131 @@ void pwm_free(struct pwm_device *pwm)
}
EXPORT_SYMBOL(pwm_free);
+static ssize_t store_blink_status(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pwm_device *pwm;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == val)
+ break;
+ else {
+ /* check if PWM ID is valid*/
+ if (val > pwm->num_pwm) {
+ dev_err(pwm->dev, "Invalid PWM ID\n");
+ return -EINVAL;
+ }
+ }
+ }
+ if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) {
+ dev_err(pwm->dev, "setting blinking for this "
+ "device not supported\n");
+ return -EINVAL;
+ }
+ /*Disable blink functionlity */
+ pwm_blink_ctrl(pwm, 0);
+ return count;
+}
+
+static DEVICE_ATTR(disable_blink, S_IWUGO, NULL, store_blink_status);
+
+static struct attribute *pwmled_attributes[] = {
+ &dev_attr_disable_blink.attr,
+ NULL
+};
+
+static const struct attribute_group pwmled_attr_group = {
+ .attrs = pwmled_attributes,
+};
+
static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
{
+ struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *plat = dev_get_platdata(parent->dev);
+ struct ab8500_pwmled_platform_data *pdata;
struct pwm_device *pwm;
+ int ret = 0 , i;
+
+ /* get pwmled specific platform data */
+ if (!plat->pwmled) {
+ dev_err(&pdev->dev, "no pwm platform data supplied\n");
+ return -EINVAL;
+ }
+ pdata = plat->pwmled;
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
*/
- pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ pwm = kzalloc(((sizeof(struct pwm_device)) * pdata->num_pwm),
+ GFP_KERNEL);
if (pwm == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
- pwm->dev = &pdev->dev;
- pwm->pwm_id = pdev->id;
- list_add_tail(&pwm->node, &pwm_list);
+ for (i = 0; i < pdata->num_pwm; i++) {
+ pwm[i].dev = &pdev->dev;
+ pwm[i].parent = parent;
+ pwm[i].blink_en = pdata->leds[i].blink_en;
+ pwm[i].pwm_id = pdata->leds[i].pwm_id;
+ pwm[i].num_pwm = pdata->num_pwm;
+ list_add_tail(&pwm[i].node, &pwm_list);
+ }
+ for (i = 0; i < pdata->num_pwm; i++) {
+ /*Implement sysfs only if blink is enabled*/
+ if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) {
+ /* sysfs implementation to disable the blink */
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &pwmled_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create"
+ " sysfs entries\n");
+ goto fail;
+ }
+ break;
+ }
+ }
+ pwm->clk = clk_get(pwm->dev, NULL);
+ if (IS_ERR(pwm->clk)) {
+ dev_err(pwm->dev, "clock request failed\n");
+ ret = PTR_ERR(pwm->clk);
+ goto err_clk;
+ }
platform_set_drvdata(pdev, pwm);
+ pwm->clk_enabled = false;
dev_dbg(pwm->dev, "pwm probe successful\n");
- return 0;
+ return ret;
+
+err_clk:
+ for (i = 0; i < pdata->num_pwm; i++) {
+ if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) {
+ sysfs_remove_group(&pdev->dev.kobj,
+ &pwmled_attr_group);
+ break;
+ }
+ }
+fail:
+ list_del(&pwm->node);
+ kfree(pwm);
+ return ret;
}
static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
{
struct pwm_device *pwm = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pwm->num_pwm; i++) {
+ if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) {
+ sysfs_remove_group(&pdev->dev.kobj,
+ &pwmled_attr_group);
+ break;
+ }
+ }
list_del(&pwm->node);
+ clk_put(pwm->clk);
dev_dbg(&pdev->dev, "pwm driver removed\n");
kfree(pwm);
return 0;
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 54f6f39f990..1035cb37695 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -18,11 +18,17 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/i2c.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
#define BH1780_REG_CONTROL 0x80
#define BH1780_REG_PARTID 0x8A
@@ -40,11 +46,20 @@
struct bh1780_data {
struct i2c_client *client;
+ struct regulator *regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
int power_state;
/* lock for sysfs operations */
struct mutex lock;
};
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void bh1780_early_suspend(struct early_suspend *ddata);
+static void bh1780_late_resume(struct early_suspend *ddata);
+#endif
+
static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
{
int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
@@ -72,6 +87,9 @@ static ssize_t bh1780_show_lux(struct device *dev,
struct bh1780_data *ddata = platform_get_drvdata(pdev);
int lsb, msb;
+ if (ddata->power_state == BH1780_POFF)
+ return -EINVAL;
+
lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
if (lsb < 0)
return lsb;
@@ -89,13 +107,9 @@ static ssize_t bh1780_show_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- int state;
-
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
- return sprintf(buf, "%d\n", state & BH1780_POWMASK);
+ /* we already maintain a sw state */
+ return sprintf(buf, "%d\n", ddata->power_state);
}
static ssize_t bh1780_store_power_state(struct device *dev,
@@ -104,7 +118,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- unsigned long val;
+ long val;
int error;
error = strict_strtoul(buf, 0, &val);
@@ -114,15 +128,25 @@ static ssize_t bh1780_store_power_state(struct device *dev,
if (val < BH1780_POFF || val > BH1780_PON)
return -EINVAL;
+ if (ddata->power_state == val)
+ return count;
+
mutex_lock(&ddata->lock);
+ if (ddata->power_state == BH1780_POFF)
+ regulator_enable(ddata->regulator);
+
error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
if (error < 0) {
mutex_unlock(&ddata->lock);
+ regulator_disable(ddata->regulator);
return error;
}
- msleep(BH1780_PON_DELAY);
+ if (val == BH1780_POFF)
+ regulator_disable(ddata->regulator);
+
+ mdelay(BH1780_PON_DELAY);
ddata->power_state = val;
mutex_unlock(&ddata->lock);
@@ -131,7 +155,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(power_state, S_IWUGO | S_IRUGO,
bh1780_show_power_state, bh1780_store_power_state);
static struct attribute *bh1780_attributes[] = {
@@ -153,21 +177,42 @@ static int __devinit bh1780_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
ret = -EIO;
- goto err_op_failed;
+ return ret;
}
ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
if (ddata == NULL) {
+ dev_err(&client->dev, "failed to alloc ddata\n");
ret = -ENOMEM;
- goto err_op_failed;
+ return ret;
}
ddata->client = client;
i2c_set_clientdata(client, ddata);
+ ddata->regulator = regulator_get(&client->dev, "vcc");
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ goto free_ddata;
+ }
+
+ regulator_enable(ddata->regulator);
+
ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
- if (ret < 0)
- goto err_op_failed;
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read part ID\n");
+ goto disable_regulator;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = bh1780_early_suspend;
+ ddata->early_suspend.resume = bh1780_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ regulator_disable(ddata->regulator);
+ ddata->power_state = BH1780_POFF;
dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
(ret & BH1780_REVMASK));
@@ -175,12 +220,17 @@ static int __devinit bh1780_probe(struct i2c_client *client,
mutex_init(&ddata->lock);
ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
- if (ret)
- goto err_op_failed;
+ if (ret) {
+ dev_err(&client->dev, "failed to create sysfs group\n");
+ goto put_regulator;
+ }
return 0;
-
-err_op_failed:
+disable_regulator:
+ regulator_disable(ddata->regulator);
+put_regulator:
+ regulator_put(ddata->regulator);
+free_ddata:
kfree(ddata);
return ret;
}
@@ -196,50 +246,106 @@ static int __devexit bh1780_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int bh1780_suspend(struct device *dev)
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int bh1780_do_suspend(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
+ mutex_lock(&ddata->lock);
- ddata->power_state = state & BH1780_POWMASK;
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
- "CONTROL");
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, "CONTROL");
if (ret < 0)
- return ret;
+ goto unlock;
- return 0;
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
}
-static int bh1780_resume(struct device *dev)
+static int bh1780_do_resume(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = ddata->power_state;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
- "CONTROL");
+ mutex_lock(&ddata->lock);
+
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL,
+ ddata->power_state, "CONTROL");
+
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int bh1780_suspend(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_suspend(ddata);
if (ret < 0)
- return ret;
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
- return 0;
+ return ret;
}
+
+static int bh1780_resume(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+
+ return ret;
+}
+
static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
#define BH1780_PMOPS (&bh1780_pm)
+#endif /* CONFIG_PM */
#else
#define BH1780_PMOPS NULL
-#endif /* CONFIG_PM */
+static void bh1780_early_suspend(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
+}
+
+static void bh1780_late_resume(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+}
+#endif /*!CONFIG_HAS_EARLYSUSPEND */
static const struct i2c_device_id bh1780_id[] = {
{ "bh1780", 0 },
@@ -252,7 +358,9 @@ static struct i2c_driver bh1780_driver = {
.id_table = bh1780_id,
.driver = {
.name = "bh1780",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
.pm = BH1780_PMOPS,
+#endif
},
};
diff --git a/drivers/misc/clonedev/Makefile b/drivers/misc/clonedev/Makefile
new file mode 100644
index 00000000000..f84859dd3ee
--- /dev/null
+++ b/drivers/misc/clonedev/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CLONEDEV) += clonedev.o
+
+ifdef CONFIG_CLONEDEV_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/misc/clonedev/clonedev.c b/drivers/misc/clonedev/clonedev.c
new file mode 100644
index 00000000000..d3b770fd324
--- /dev/null
+++ b/drivers/misc/clonedev/clonedev.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Device for display cloning on external output.
+ *
+ * Author: Per-Daniel Olsson <per-daniel.olsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+
+#include <linux/clonedev.h>
+
+#include <linux/compdev.h>
+#include <linux/mm.h>
+#include <video/mcde.h>
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+struct clonedev {
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ bool open;
+ struct compdev *src_compdev;
+ struct compdev *dst_compdev;
+ bool overlay_case;
+ struct compdev_size dst_size;
+ struct compdev_scene_info s_info;
+};
+
+static void best_fit(struct compdev_rect *src_rect,
+ struct compdev_size *dst_size,
+ struct compdev_img *img)
+{
+ /* aspect ratio in 26.6 fixed point */
+ int aspect = 1;
+ int dst_w;
+ int dst_h;
+
+ if (img->rotation == COMPDEV_ROT_90_CCW ||
+ img->rotation == COMPDEV_ROT_270_CCW)
+ aspect = (src_rect->height << 6) / src_rect->width;
+ else
+ aspect = (src_rect->width << 6) / src_rect->height;
+
+ dst_w = aspect * dst_size->height >> 6;
+ dst_h = dst_size->height;
+ img->dst_rect.y = 0;
+
+ if (dst_w > dst_size->width) {
+ /*
+ * Destination rectangle too wide.
+ * Clamp to image width. Keep aspect ratio.
+ */
+ dst_h = (dst_size->width << 6) / aspect;
+ dst_w = dst_size->width;
+ }
+
+ /* center the image */
+ if (dst_w < dst_size->width) {
+ int offset = (dst_size->width - dst_w) / 2;
+ img->dst_rect.x = offset;
+ }
+
+ if (dst_h < dst_size->height) {
+ int offset = (dst_size->height - dst_h) / 2;
+ img->dst_rect.y = offset;
+ }
+
+ img->dst_rect.width = dst_w;
+ img->dst_rect.height = dst_h;
+}
+
+static int clonedev_open(struct inode *inode, struct file *file)
+{
+ struct clonedev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+
+ if (&cd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (cd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ cd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ file->private_data = cd;
+
+ return 0;
+}
+
+static int clonedev_release(struct inode *inode, struct file *file)
+{
+ struct clonedev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&cd->list == &dev_list)
+ return -ENODEV;
+
+ cd->open = false;
+ return 0;
+}
+
+static long clonedev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct clonedev *cd = (struct clonedev *)file->private_data;
+
+ mutex_lock(&cd->lock);
+
+ switch (cmd) {
+ case CLONEDEV_SET_MODE_IOC:
+ /* TODO: Get the user data */
+
+ break;
+
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&cd->lock);
+
+ return ret;
+}
+
+static const struct file_operations clonedev_fops = {
+ .open = clonedev_open,
+ .release = clonedev_release,
+ .unlocked_ioctl = clonedev_ioctl,
+};
+
+static void init_clonedev(struct clonedev *cd, const char *name)
+{
+ mutex_init(&cd->lock);
+ INIT_LIST_HEAD(&cd->list);
+
+ cd->mdev.minor = MISC_DYNAMIC_MINOR;
+ cd->mdev.name = name;
+ cd->mdev.fops = &clonedev_fops;
+}
+
+static void clonedev_post_buffer_callback(void *data,
+ struct compdev_img *cb_img)
+{
+ struct clonedev *cd = (struct clonedev *)data;
+
+ mutex_lock(&cd->lock);
+
+ if (!cd->overlay_case || (cd->overlay_case &&
+ (cb_img->flags & COMPDEV_OVERLAY_FLAG))) {
+ struct compdev_img img;
+
+ img = *cb_img;
+
+ if (img.flags & COMPDEV_BYPASS_FLAG)
+ img.flags &= ~COMPDEV_BYPASS_FLAG;
+
+ if (cd->overlay_case)
+ img.rotation = cd->s_info.ovly_rotation;
+ else
+ img.rotation = cd->s_info.fb_rotation;
+
+ best_fit(&img.src_rect, &cd->dst_size, &img);
+
+ compdev_post_buffer(cd->dst_compdev, &img);
+ }
+ mutex_unlock(&cd->lock);
+}
+
+static void clonedev_post_scene_info_callback(void *data,
+ struct compdev_scene_info *s_info)
+{
+ struct clonedev *cd = (struct clonedev *)data;
+
+ mutex_lock(&cd->lock);
+ if (s_info->img_count > 1)
+ cd->overlay_case = true;
+ else
+ cd->overlay_case = false;
+
+ cd->s_info = *s_info;
+ cd->s_info.img_count = 1;
+ compdev_post_scene_info(cd->dst_compdev, &cd->s_info);
+ mutex_unlock(&cd->lock);
+}
+
+int clonedev_create(void)
+{
+ int ret;
+ struct clonedev *cd;
+
+ static int counter;
+ char name[10];
+
+ cd = kzalloc(sizeof(struct clonedev), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s%d", CLONEDEV_DEFAULT_DEVICE_PREFIX,
+ counter++);
+ init_clonedev(cd, name);
+
+ ret = misc_register(&cd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&cd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ mutex_lock(&cd->lock);
+
+ compdev_get(0, &cd->src_compdev);
+ compdev_get(1, &cd->dst_compdev);
+ compdev_get_size(cd->dst_compdev, &cd->dst_size);
+
+ compdev_register_listener_callbacks(cd->src_compdev, (void *)cd,
+ &clonedev_post_buffer_callback,
+ &clonedev_post_scene_info_callback);
+
+ mutex_unlock(&cd->lock);
+ goto out;
+
+fail_register_misc:
+ kfree(cd);
+out:
+ return ret;
+}
+
+void clonedev_destroy(void)
+{
+ struct clonedev *cd;
+ struct clonedev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ compdev_put(cd->src_compdev);
+ compdev_put(cd->dst_compdev);
+ compdev_deregister_callbacks(cd->src_compdev);
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ kfree(cd);
+ break;
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void clonedev_destroy_all(void)
+{
+ struct clonedev *cd;
+ struct clonedev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ kfree(cd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init clonedev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(clonedev_init);
+
+static void __exit clonedev_exit(void)
+{
+ clonedev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(clonedev_exit);
+
+MODULE_AUTHOR("Per-Daniel Olsson <per-daniel.olsson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Device for display cloning on external output");
+
diff --git a/drivers/misc/compdev/Makefile b/drivers/misc/compdev/Makefile
new file mode 100644
index 00000000000..b8385848712
--- /dev/null
+++ b/drivers/misc/compdev/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_COMPDEV) += compdev.o
+
+ifdef CONFIG_COMPDEV_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+
diff --git a/drivers/misc/compdev/compdev.c b/drivers/misc/compdev/compdev.c
new file mode 100644
index 00000000000..d929a02c565
--- /dev/null
+++ b/drivers/misc/compdev/compdev.c
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display overlay compositer device driver
+ *
+ * Author: Anders Bauer <anders.bauer@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * Modified: Per-Daniel Olsson <per-daniel.olsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+
+#include <linux/compdev.h>
+#include <linux/hwmem.h>
+#include <linux/mm.h>
+#include <video/mcde_dss.h>
+#include <video/b2r2_blt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#define BUFFER_CACHE_DEPTH 2
+#define NUM_COMPDEV_BUFS 2
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+static int dev_counter;
+
+struct compdev_buffer {
+ struct hwmem_alloc *alloc;
+ enum compdev_ptr_type type;
+ u32 size;
+ u32 paddr; /* if pinned */
+};
+
+struct compdev_img_internal {
+ struct compdev_img img;
+ u32 ref_count;
+};
+
+struct compdev_blt_work {
+ struct work_struct work;
+ struct compdev_img *src_img;
+ struct compdev_img_internal *dst_img;
+ int blt_handle;
+ bool mcde_rotation;
+ struct device *dev;
+};
+
+struct compdev_post_callback_work {
+ struct work_struct work;
+ struct compdev_img *img;
+ post_buffer_callback pb_cb;
+ void *cb_data;
+ struct device *dev;
+};
+
+struct buffer_cache_context {
+ struct compdev_img_internal
+ *img[BUFFER_CACHE_DEPTH];
+ u8 index;
+ u8 unused_counter;
+ struct device *dev;
+};
+
+struct dss_context {
+ struct device *dev;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly[NUM_COMPDEV_BUFS];
+ struct compdev_buffer ovly_buffer[NUM_COMPDEV_BUFS];
+ struct compdev_size phy_size;
+ enum mcde_display_rotation display_rotation;
+ enum compdev_rotation current_buffer_rotation;
+ int blt_handle;
+ u8 temp_img_count;
+ struct compdev_img_internal *temp_img[NUM_COMPDEV_BUFS];
+ struct buffer_cache_context cache_ctx;
+};
+
+struct compdev {
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct device *dev;
+ struct list_head list;
+ struct dss_context dss_ctx;
+ u16 ref_count;
+ struct workqueue_struct *worker_thread;
+ int dev_index;
+ post_buffer_callback pb_cb;
+ post_scene_info_callback si_cb;
+ struct compdev_scene_info s_info;
+ u8 sync_count;
+ u8 image_count;
+ struct compdev_img *images[NUM_COMPDEV_BUFS];
+ struct completion fence;
+ void *cb_data;
+ bool mcde_rotation;
+};
+
+static struct compdev *compdevs[MAX_NBR_OF_COMPDEVS];
+
+static int compdev_post_buffers_dss(struct dss_context *dss_ctx,
+ struct compdev_img *img1, struct compdev_img *img2);
+
+
+static int compdev_open(struct inode *inode, struct file *file)
+{
+ struct compdev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+
+ if (&cd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&dev_list_lock);
+ file->private_data = cd;
+ return 0;
+}
+
+static int disable_overlay(struct mcde_overlay *ovly)
+{
+ struct mcde_overlay_info info;
+
+ mcde_dss_get_overlay_info(ovly, &info);
+ if (info.paddr != 0) {
+ /* Set the pointer to zero to disable the overlay */
+ info.paddr = 0;
+ mcde_dss_apply_overlay(ovly, &info);
+ }
+ return 0;
+}
+
+static int compdev_release(struct inode *inode, struct file *file)
+{
+ struct compdev *cd = NULL;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&cd->list == &dev_list)
+ return -ENODEV;
+
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ disable_overlay(cd->dss_ctx.ovly[i]);
+ if (cd->dss_ctx.ovly_buffer[i].paddr &&
+ cd->dss_ctx.ovly_buffer[i].type ==
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET)
+ hwmem_unpin(cd->dss_ctx.ovly_buffer[i].alloc);
+
+ cd->dss_ctx.ovly_buffer[i].alloc = NULL;
+ cd->dss_ctx.ovly_buffer[i].size = 0;
+ cd->dss_ctx.ovly_buffer[i].paddr = 0;
+ }
+
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum compdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case COMPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case COMPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case COMPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case COMPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case COMPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static int compdev_setup_ovly(struct compdev_img *img,
+ struct compdev_buffer *buffer,
+ struct mcde_overlay *ovly,
+ int z_order,
+ struct dss_context *dss_ctx)
+{
+ int ret = 0;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct mcde_overlay_info info;
+
+ if (img->buf.type == COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET) {
+ buffer->type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET;
+ buffer->alloc = hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(buffer->alloc)) {
+ ret = PTR_ERR(buffer->alloc);
+ dev_warn(dss_ctx->dev,
+ "HWMEM resolve failed, %d\n", ret);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buffer->alloc, &buffer->size, &memtype,
+ &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ dev_warn(dss_ctx->dev,
+ "Invalid_mem overlay, %d\n", ret);
+ goto invalid_mem;
+ }
+ ret = hwmem_pin(buffer->alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(dss_ctx->dev,
+ "Pin failed, %d\n", ret);
+ goto pin_failed;
+ }
+
+ rgn.size = rgn.end = buffer->size;
+ ret = hwmem_set_domain(buffer->alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(dss_ctx->dev,
+ "Set domain failed, %d\n", ret);
+
+ buffer->paddr = mem_chunk.paddr;
+ } else if (img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ buffer->type = COMPDEV_PTR_PHYSICAL;
+ buffer->alloc = NULL;
+ buffer->size = img->buf.len;
+ buffer->paddr = img->buf.offset;
+ }
+
+ info.stride = img->pitch;
+ info.fmt = get_ovly_fmt(img->fmt);
+ info.src_x = 0;
+ info.src_y = 0;
+ info.dst_x = img->dst_rect.x;
+ info.dst_y = img->dst_rect.y;
+ info.dst_z = z_order;
+ info.w = img->dst_rect.width;
+ info.h = img->dst_rect.height;
+ info.dirty.x = 0;
+ info.dirty.y = 0;
+ info.dirty.w = img->dst_rect.width;
+ info.dirty.h = img->dst_rect.height;
+ info.paddr = buffer->paddr;
+
+ mcde_dss_apply_overlay(ovly, &info);
+ return ret;
+
+pin_failed:
+invalid_mem:
+ buffer->alloc = NULL;
+ buffer->size = 0;
+ buffer->paddr = 0;
+
+resolve_failed:
+ return ret;
+}
+
+static int compdev_update_rotation(struct dss_context *dss_ctx,
+ enum compdev_rotation rotation)
+{
+ /* Set video mode */
+ struct mcde_video_mode vmode;
+ int ret = 0;
+
+ memset(&vmode, 0, sizeof(struct mcde_video_mode));
+ mcde_dss_get_video_mode(dss_ctx->ddev, &vmode);
+ if ((dss_ctx->display_rotation + rotation) % 180) {
+ vmode.xres = dss_ctx->phy_size.height;
+ vmode.yres = dss_ctx->phy_size.width;
+ } else {
+ vmode.xres = dss_ctx->phy_size.width;
+ vmode.yres = dss_ctx->phy_size.height;
+ }
+
+ /* Set rotation */
+ ret = mcde_dss_set_rotation(dss_ctx->ddev,
+ (dss_ctx->display_rotation + rotation) % 360);
+ if (ret != 0)
+ goto exit;
+
+ ret = mcde_dss_set_video_mode(dss_ctx->ddev, &vmode);
+ if (ret != 0)
+ goto exit;
+
+
+ /* Apply */
+ ret = mcde_dss_apply_channel(dss_ctx->ddev);
+exit:
+ return ret;
+}
+
+static int release_prev_frame(struct dss_context *dss_ctx)
+{
+ int ret = 0;
+ int i;
+
+ /* Handle unpin of previous buffers */
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ if (dss_ctx->ovly_buffer[i].type ==
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET &&
+ dss_ctx->ovly_buffer[i].paddr != 0) {
+ hwmem_unpin(dss_ctx->ovly_buffer[i].alloc);
+ hwmem_release(dss_ctx->ovly_buffer[i].alloc);
+ }
+ dss_ctx->ovly_buffer[i].alloc = NULL;
+ dss_ctx->ovly_buffer[i].size = 0;
+ dss_ctx->ovly_buffer[i].paddr = 0;
+ }
+ return ret;
+
+}
+
+static enum b2r2_blt_fmt compdev_to_blt_format(enum compdev_fmt fmt)
+{
+ switch (fmt) {
+ case COMPDEV_FMT_RGBA8888:
+ return B2R2_BLT_FMT_32_BIT_ABGR8888;
+ case COMPDEV_FMT_RGB888:
+ return B2R2_BLT_FMT_24_BIT_RGB888;
+ case COMPDEV_FMT_RGB565:
+ return B2R2_BLT_FMT_16_BIT_RGB565;
+ case COMPDEV_FMT_YUV422:
+ return B2R2_BLT_FMT_CB_Y_CR_Y;
+ case COMPDEV_FMT_YCBCR42XMBN:
+ return B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE;
+ case COMPDEV_FMT_YUV420_SP:
+ return B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR;
+ case COMPDEV_FMT_YVU420_SP:
+ return B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR;
+ case COMPDEV_FMT_YUV420_P:
+ return B2R2_BLT_FMT_YUV420_PACKED_PLANAR;
+ default:
+ return B2R2_BLT_FMT_UNUSED;
+ }
+}
+
+static enum b2r2_blt_transform to_blt_transform
+ (enum compdev_rotation compdev_rot)
+{
+ switch (compdev_rot) {
+ case COMPDEV_ROT_0:
+ return B2R2_BLT_TRANSFORM_NONE;
+ case COMPDEV_ROT_90_CCW:
+ return B2R2_BLT_TRANSFORM_CCW_ROT_90;
+ case COMPDEV_ROT_180:
+ return B2R2_BLT_TRANSFORM_CCW_ROT_180;
+ case COMPDEV_ROT_270_CCW:
+ return B2R2_BLT_TRANSFORM_CCW_ROT_90;
+ default:
+ return B2R2_BLT_TRANSFORM_NONE;
+ }
+}
+
+static u32 get_stride(u32 width, enum compdev_fmt fmt)
+{
+ u32 stride = 0;
+ switch (fmt) {
+ case COMPDEV_FMT_RGB565:
+ stride = width * 2;
+ break;
+ case COMPDEV_FMT_RGB888:
+ stride = width * 3;
+ break;
+ case COMPDEV_FMT_RGBX8888:
+ stride = width * 4;
+ break;
+ case COMPDEV_FMT_RGBA8888:
+ stride = width * 4;
+ break;
+ case COMPDEV_FMT_YUV422:
+ stride = width * 2;
+ break;
+ case COMPDEV_FMT_YCBCR42XMBN:
+ case COMPDEV_FMT_YUV420_SP:
+ case COMPDEV_FMT_YVU420_SP:
+ case COMPDEV_FMT_YUV420_P:
+ stride = width;
+ break;
+ }
+
+ /* The display controller requires 8 byte aligned strides */
+ if (stride % 8)
+ stride += 8 - (stride % 8);
+
+ return stride;
+}
+
+static int alloc_comp_internal_img(enum compdev_fmt fmt,
+ u16 width, u16 height, struct compdev_img_internal **img_pp)
+{
+ struct hwmem_alloc *alloc;
+ int name;
+ u32 size;
+ u32 stride;
+ struct compdev_img_internal *img;
+
+ stride = get_stride(width, fmt);
+ size = stride * height;
+ size = PAGE_ALIGN(size);
+
+ img = kzalloc(sizeof(struct compdev_img_internal), GFP_KERNEL);
+
+ if (!img)
+ return -ENOMEM;
+
+ alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED,
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+
+ if (IS_ERR(alloc)) {
+ kfree(img);
+ img = NULL;
+ return PTR_ERR(alloc);
+ }
+
+ name = hwmem_get_name(alloc);
+ if (name < 0) {
+ kfree(img);
+ img = NULL;
+ hwmem_release(alloc);
+ return name;
+ }
+
+ img->img.height = height;
+ img->img.width = width;
+ img->img.fmt = fmt;
+ img->img.pitch = stride;
+ img->img.buf.hwmem_buf_name = name;
+ img->img.buf.type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET;
+ img->img.buf.offset = 0;
+ img->img.buf.len = size;
+
+ img->ref_count = 1;
+
+ *img_pp = img;
+
+ return 0;
+}
+
+static void free_comp_img_buf(struct compdev_img_internal *img,
+ struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (img != NULL && img->ref_count) {
+ img->ref_count--;
+ if (img->ref_count == 0) {
+ struct hwmem_alloc *alloc;
+ if (img->img.buf.hwmem_buf_name > 0) {
+ alloc = hwmem_resolve_by_name(
+ img->img.buf.hwmem_buf_name);
+ if (IS_ERR(alloc)) {
+ dev_err(dev, "%s: Error getting Alloc "
+ "from HWMEM\n", __func__);
+ return;
+ }
+ /* Double release needed */
+ hwmem_release(alloc);
+ hwmem_release(alloc);
+ }
+ kfree(img);
+ }
+ }
+}
+
+struct compdev_img_internal *compdev_buffer_cache_get_image(
+ struct buffer_cache_context *cache_ctx, enum compdev_fmt fmt,
+ u16 width, u16 height)
+{
+ int i;
+ struct compdev_img_internal *img = NULL;
+
+ dev_dbg(cache_ctx->dev, "%s\n", __func__);
+
+ /* First check for a cache hit */
+ if (cache_ctx->unused_counter > 0) {
+ u8 active_index = cache_ctx->index;
+ struct compdev_img_internal *temp =
+ cache_ctx->img[active_index];
+ if (temp != NULL && temp->img.fmt == fmt &&
+ temp->img.width == width &&
+ temp->img.height == height) {
+ img = temp;
+ cache_ctx->unused_counter = 0;
+ }
+ }
+ /* Check if there was a cache hit */
+ if (img == NULL) {
+ /* Create new buffers and release old */
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cache_ctx->img[i]) {
+ free_comp_img_buf(cache_ctx->img[i],
+ cache_ctx->dev);
+ cache_ctx->img[i] = NULL;
+ }
+ cache_ctx->index = 0;
+ if (alloc_comp_internal_img(fmt, width, height,
+ &cache_ctx->img[i]))
+ dev_err(cache_ctx->dev,
+ "%s: Allocation error\n",
+ __func__);
+ }
+ img = cache_ctx->img[0];
+ }
+
+ if (img != NULL) {
+ img->ref_count++;
+ cache_ctx->unused_counter = 0;
+ cache_ctx->index++;
+ if (cache_ctx->index >= BUFFER_CACHE_DEPTH)
+ cache_ctx->index = 0;
+ }
+
+ return img;
+}
+
+static void compdev_buffer_cache_mark_frame
+ (struct buffer_cache_context *cache_ctx)
+{
+ if (cache_ctx->unused_counter < 2)
+ cache_ctx->unused_counter++;
+ if (cache_ctx->unused_counter == 2) {
+ int i;
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cache_ctx->img[i]) {
+ free_comp_img_buf(cache_ctx->img[i],
+ cache_ctx->dev);
+ cache_ctx->img[i] = NULL;
+ }
+ }
+ }
+}
+
+static bool check_hw_format(enum compdev_fmt fmt)
+{
+ if (fmt == COMPDEV_FMT_RGB565 ||
+ fmt == COMPDEV_FMT_RGB888 ||
+ fmt == COMPDEV_FMT_RGBA8888 ||
+ fmt == COMPDEV_FMT_RGBX8888 ||
+ fmt == COMPDEV_FMT_YUV422)
+ return true;
+ else
+ return false;
+}
+
+static enum compdev_fmt find_compatible_fmt(enum compdev_fmt fmt, bool rotation)
+{
+ if (!rotation) {
+ switch (fmt) {
+ case COMPDEV_FMT_RGB565:
+ case COMPDEV_FMT_RGB888:
+ case COMPDEV_FMT_RGBA8888:
+ case COMPDEV_FMT_RGBX8888:
+ return fmt;
+ case COMPDEV_FMT_YUV422:
+ case COMPDEV_FMT_YCBCR42XMBN:
+ case COMPDEV_FMT_YUV420_SP:
+ case COMPDEV_FMT_YVU420_SP:
+ case COMPDEV_FMT_YUV420_P:
+ return COMPDEV_FMT_YUV422;
+ default:
+ return COMPDEV_FMT_RGBA8888;
+ }
+ } else {
+ switch (fmt) {
+ case COMPDEV_FMT_RGB565:
+ case COMPDEV_FMT_RGB888:
+ case COMPDEV_FMT_RGBA8888:
+ case COMPDEV_FMT_RGBX8888:
+ return fmt;
+ case COMPDEV_FMT_YUV422:
+ case COMPDEV_FMT_YCBCR42XMBN:
+ case COMPDEV_FMT_YUV420_SP:
+ case COMPDEV_FMT_YVU420_SP:
+ case COMPDEV_FMT_YUV420_P:
+ return COMPDEV_FMT_RGB888;
+ default:
+ return COMPDEV_FMT_RGBA8888;
+ }
+ }
+}
+
+static void compdev_callback_worker_function(struct work_struct *work)
+{
+ struct compdev_post_callback_work *cb_work =
+ (struct compdev_post_callback_work *)work;
+
+ if (cb_work->pb_cb != NULL)
+ cb_work->pb_cb(cb_work->cb_data, cb_work->img);
+}
+static void compdev_blt_worker_function(struct work_struct *work)
+{
+ struct compdev_blt_work *blt_work = (struct compdev_blt_work *)work;
+ struct compdev_img *src_img;
+ struct compdev_img *dst_img;
+ struct b2r2_blt_req req;
+ int req_id;
+
+ dev_dbg(blt_work->dev, "%s\n", __func__);
+
+ src_img = blt_work->src_img;
+ dst_img = &blt_work->dst_img->img;
+
+ memset(&req, 0, sizeof(req));
+ req.size = sizeof(req);
+
+ if (src_img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ req.src_img.buf.type = B2R2_BLT_PTR_PHYSICAL;
+ req.src_img.buf.fd = src_img->buf.fd;
+ } else {
+ struct hwmem_alloc *alloc;
+
+ req.src_img.buf.type = B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET;
+ req.src_img.buf.hwmem_buf_name = src_img->buf.hwmem_buf_name;
+
+ alloc = hwmem_resolve_by_name(src_img->buf.hwmem_buf_name);
+ if (IS_ERR(alloc)) {
+ dev_warn(blt_work->dev,
+ "HWMEM resolve failed\n");
+ }
+ hwmem_set_access(alloc,
+ HWMEM_ACCESS_READ | HWMEM_ACCESS_IMPORT,
+ task_tgid_nr(current));
+ hwmem_release(alloc);
+ }
+ req.src_img.pitch = src_img->pitch;
+ req.src_img.buf.offset = src_img->buf.offset;
+ req.src_img.buf.len = src_img->buf.len;
+ req.src_img.fmt = compdev_to_blt_format(src_img->fmt);
+ req.src_img.width = src_img->width;
+ req.src_img.height = src_img->height;
+
+ req.src_rect.x = src_img->src_rect.x;
+ req.src_rect.y = src_img->src_rect.y;
+ req.src_rect.width = src_img->src_rect.width;
+ req.src_rect.height = src_img->src_rect.height;
+
+ if (dst_img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ req.dst_img.buf.type = B2R2_BLT_PTR_PHYSICAL;
+ req.dst_img.buf.fd = dst_img->buf.fd;
+ } else {
+ req.dst_img.buf.type = B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET;
+ req.dst_img.buf.hwmem_buf_name = dst_img->buf.hwmem_buf_name;
+ }
+ req.dst_img.pitch = dst_img->pitch;
+ req.dst_img.buf.offset = dst_img->buf.offset;
+ req.dst_img.buf.len = dst_img->buf.len;
+ req.dst_img.fmt = compdev_to_blt_format(dst_img->fmt);
+ req.dst_img.width = dst_img->width;
+ req.dst_img.height = dst_img->height;
+
+ if (blt_work->mcde_rotation)
+ req.transform = B2R2_BLT_TRANSFORM_NONE;
+ else
+ req.transform = to_blt_transform(src_img->rotation);
+ req.dst_rect.x = 0;
+ req.dst_rect.y = 0;
+ req.dst_rect.width = src_img->dst_rect.width;
+ req.dst_rect.height = src_img->dst_rect.height;
+
+ req.global_alpha = 0xff;
+ req.flags = B2R2_BLT_FLAG_DITHER;
+
+ req_id = b2r2_blt_request(blt_work->blt_handle, &req);
+
+ if (b2r2_blt_synch(blt_work->blt_handle, req_id) < 0) {
+ dev_err(blt_work->dev,
+ "%s: Could not perform b2r2_blt_synch",
+ __func__);
+ }
+
+ dst_img->src_rect.x = 0;
+ dst_img->src_rect.x = 0;
+ dst_img->src_rect.width = dst_img->width;
+ dst_img->src_rect.height = dst_img->height;
+
+ dst_img->dst_rect.x = src_img->dst_rect.x;
+ dst_img->dst_rect.y = src_img->dst_rect.y;
+ dst_img->dst_rect.width = src_img->dst_rect.width;
+ dst_img->dst_rect.height = src_img->dst_rect.height;
+
+ dst_img->rotation = src_img->rotation;
+}
+
+static int compdev_post_buffer_locked(struct compdev *cd,
+ struct compdev_img *src_img)
+{
+ int ret = 0;
+ int i;
+ bool transform_needed = false;
+ struct compdev_img *resulting_img;
+ struct compdev_blt_work blt_work;
+ struct compdev_post_callback_work cb_work;
+ bool callback_work = false;
+ bool bypass_case = false;
+
+ dev_dbg(cd->dev, "%s\n", __func__);
+
+ /* Free potential temp buffers */
+ for (i = 0; i < cd->dss_ctx.temp_img_count; i++)
+ free_comp_img_buf(cd->dss_ctx.temp_img[i], cd->dev);
+ cd->dss_ctx.temp_img_count = 0;
+
+ /* Check for bypass images */
+ if (src_img->flags & COMPDEV_BYPASS_FLAG)
+ bypass_case = true;
+
+ /* Handle callback */
+ if (cd->pb_cb != NULL) {
+ callback_work = true;
+ INIT_WORK((struct work_struct *)&cb_work,
+ compdev_callback_worker_function);
+ cb_work.img = src_img;
+ cb_work.pb_cb = cd->pb_cb;
+ cb_work.cb_data = cd->cb_data;
+ cb_work.dev = cd->dev;
+ queue_work(cd->worker_thread, (struct work_struct *)&cb_work);
+ }
+
+ if (!bypass_case) {
+ /* Determine if transform is needed */
+ /* First check scaling */
+ if ((src_img->rotation == COMPDEV_ROT_0 ||
+ src_img->rotation == COMPDEV_ROT_180) &&
+ (src_img->src_rect.width != src_img->dst_rect.width ||
+ src_img->src_rect.height != src_img->dst_rect.height))
+ transform_needed = true;
+ else if ((src_img->rotation == COMPDEV_ROT_90_CCW ||
+ src_img->rotation == COMPDEV_ROT_270_CCW) &&
+ (src_img->src_rect.width != src_img->dst_rect.height ||
+ src_img->src_rect.height != src_img->dst_rect.width))
+ transform_needed = true;
+
+ if (!transform_needed && check_hw_format(src_img->fmt) == false)
+ transform_needed = true;
+
+ if (transform_needed) {
+ u16 width = 0;
+ u16 height = 0;
+ enum compdev_fmt fmt;
+
+ INIT_WORK((struct work_struct *)&blt_work,
+ compdev_blt_worker_function);
+
+ if (cd->dss_ctx.blt_handle == 0) {
+ dev_dbg(cd->dev, "%s: B2R2 opened\n", __func__);
+ cd->dss_ctx.blt_handle = b2r2_blt_open();
+ if (cd->dss_ctx.blt_handle < 0) {
+ dev_warn(cd->dev,
+ "%s(%d): Failed to "
+ "open b2r2 device\n",
+ __func__, __LINE__);
+ }
+ }
+ blt_work.blt_handle = cd->dss_ctx.blt_handle;
+ blt_work.src_img = src_img;
+ blt_work.mcde_rotation = cd->mcde_rotation;
+
+ width = src_img->dst_rect.width;
+ height = src_img->dst_rect.height;
+
+ fmt = find_compatible_fmt(src_img->fmt,
+ (!cd->mcde_rotation) &&
+ (src_img->rotation != COMPDEV_ROT_0));
+
+ blt_work.dst_img = compdev_buffer_cache_get_image
+ (&cd->dss_ctx.cache_ctx,
+ fmt, width, height);
+
+ blt_work.dst_img->img.flags = src_img->flags;
+ blt_work.dev = cd->dev;
+
+ queue_work(cd->worker_thread,
+ (struct work_struct *)&blt_work);
+ flush_work_sync((struct work_struct *)&blt_work);
+
+ resulting_img = &blt_work.dst_img->img;
+
+ cd->dss_ctx.temp_img[cd->dss_ctx.temp_img_count] =
+ blt_work.dst_img;
+ cd->dss_ctx.temp_img_count++;
+
+ } else {
+ resulting_img = src_img;
+ }
+
+ if (!cd->mcde_rotation)
+ resulting_img->rotation = COMPDEV_ROT_0;
+
+ cd->images[cd->image_count] = resulting_img;
+ cd->image_count++;
+
+ /* make sure that a potential callback has returned */
+ if (callback_work)
+ flush_work_sync((struct work_struct *)&cb_work);
+
+ if (cd->sync_count > 1) {
+ cd->sync_count--;
+ mutex_unlock(&cd->lock);
+ /* Wait for fence */
+ wait_for_completion(&cd->fence);
+ mutex_lock(&cd->lock);
+ } else {
+ struct compdev_img *img1 = NULL;
+ struct compdev_img *img2 = NULL;
+
+ if (cd->sync_count)
+ cd->sync_count--;
+
+ img1 = cd->images[0];
+ if (cd->image_count)
+ img2 = cd->images[1];
+
+ /* Do the refresh */
+ compdev_post_buffers_dss(&cd->dss_ctx, img1, img2);
+ compdev_buffer_cache_mark_frame
+ (&cd->dss_ctx.cache_ctx);
+
+ if (cd->s_info.img_count > 1) {
+ /* Releasing fence */
+ complete(&cd->fence);
+ }
+
+ cd->sync_count = 0;
+ cd->image_count = 0;
+ cd->images[0] = NULL;
+ cd->images[1] = NULL;
+ }
+ } else {
+ /* make sure that a potential callback has returned */
+ if (callback_work)
+ flush_work_sync((struct work_struct *)&cb_work);
+ }
+
+ return ret;
+}
+
+static int compdev_post_buffers_dss(struct dss_context *dss_ctx,
+ struct compdev_img *img1, struct compdev_img *img2)
+{
+ int ret = 0;
+ int i = 0;
+
+ struct compdev_img *fb_img = NULL;
+ struct compdev_img *ovly_img = NULL;
+
+ /* Unpin the previous frame */
+ release_prev_frame(dss_ctx);
+
+ /* Set channel rotation */
+ if (img1 != NULL &&
+ (dss_ctx->current_buffer_rotation != img1->rotation)) {
+ if (compdev_update_rotation(dss_ctx, img1->rotation) != 0)
+ dev_warn(dss_ctx->dev,
+ "Failed to update MCDE rotation "
+ "(img1->rotation = %d), %d\n",
+ img1->rotation, ret);
+ else
+ dss_ctx->current_buffer_rotation = img1->rotation;
+ }
+
+ if ((img1 != NULL) && (img1->flags & COMPDEV_OVERLAY_FLAG))
+ ovly_img = img1;
+ else if (img1 != NULL)
+ fb_img = img1;
+
+
+ if ((img2 != NULL) && (img2->flags & COMPDEV_OVERLAY_FLAG))
+ ovly_img = img2;
+ else if (img2 != NULL)
+ fb_img = img2;
+
+ /* Handle buffers */
+ if (fb_img != NULL) {
+ ret = compdev_setup_ovly(fb_img,
+ &dss_ctx->ovly_buffer[i], dss_ctx->ovly[0], 1, dss_ctx);
+ if (ret)
+ dev_warn(dss_ctx->dev,
+ "Failed to setup overlay[%d], %d\n", 0, ret);
+ i++;
+ } else {
+ disable_overlay(dss_ctx->ovly[0]);
+ }
+
+
+ if (ovly_img != NULL) {
+ ret = compdev_setup_ovly(ovly_img,
+ &dss_ctx->ovly_buffer[i], dss_ctx->ovly[1], 0, dss_ctx);
+ if (ret)
+ dev_warn(dss_ctx->dev,
+ "Failed to setup overlay[%d], %d\n", 1, ret);
+ } else {
+ disable_overlay(dss_ctx->ovly[1]);
+ }
+
+ /* Do the display update */
+ mcde_dss_update_overlay(dss_ctx->ovly[0], true);
+
+ return ret;
+}
+
+static int compdev_post_scene_info_locked(struct compdev *cd,
+ struct compdev_scene_info *s_info)
+{
+ int ret = 0;
+
+ dev_dbg(cd->dev, "%s\n", __func__);
+
+ cd->s_info = *s_info;
+ cd->sync_count = cd->s_info.img_count;
+
+ /* always complete the fence in case someone is hanging incorrectly. */
+ complete(&cd->fence);
+ init_completion(&cd->fence);
+
+ /* Handle callback */
+ if (cd->si_cb != NULL) {
+ mutex_unlock(&cd->lock);
+ cd->si_cb(cd->cb_data, s_info);
+ mutex_lock(&cd->lock);
+ }
+ return ret;
+}
+
+
+static int compdev_get_size_locked(struct dss_context *dss_ctx,
+ struct compdev_size *size)
+{
+ int ret = 0;
+ if ((dss_ctx->display_rotation) % 180) {
+ size->height = dss_ctx->phy_size.width;
+ size->width = dss_ctx->phy_size.height;
+ } else {
+ size->height = dss_ctx->phy_size.height;
+ size->width = dss_ctx->phy_size.width;
+ }
+
+ return ret;
+}
+
+static int compdev_get_listener_state_locked(struct compdev *cd,
+ enum compdev_listener_state *state)
+{
+ int ret = 0;
+
+ *state = COMPDEV_LISTENER_OFF;
+ if (cd->pb_cb != NULL)
+ *state = COMPDEV_LISTENER_ON;
+ return ret;
+}
+
+static long compdev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct compdev *cd = (struct compdev *)file->private_data;
+ struct compdev_img img;
+ struct compdev_scene_info s_info;
+
+ mutex_lock(&cd->lock);
+
+ switch (cmd) {
+ case COMPDEV_GET_SIZE_IOC:
+ {
+ struct compdev_size tmp;
+ compdev_get_size_locked(&cd->dss_ctx, &tmp);
+ ret = copy_to_user((void __user *)arg, &tmp,
+ sizeof(tmp));
+ if (ret)
+ ret = -EFAULT;
+ }
+ break;
+ case COMPDEV_GET_LISTENER_STATE_IOC:
+ {
+ enum compdev_listener_state state;
+ compdev_get_listener_state_locked(cd, &state);
+ ret = copy_to_user((void __user *)arg, &state,
+ sizeof(state));
+ if (ret)
+ ret = -EFAULT;
+ }
+ break;
+ case COMPDEV_POST_BUFFER_IOC:
+ memset(&img, 0, sizeof(img));
+ /* Get the user data */
+ if (copy_from_user(&img, (void *)arg, sizeof(img))) {
+ dev_warn(cd->dev,
+ "%s: copy_from_user failed\n",
+ __func__);
+ mutex_unlock(&cd->lock);
+ return -EFAULT;
+ }
+ ret = compdev_post_buffer_locked(cd, &img);
+
+ break;
+ case COMPDEV_POST_SCENE_INFO_IOC:
+ memset(&s_info, 0, sizeof(s_info));
+ /* Get the user data */
+ if (copy_from_user(&s_info, (void *)arg, sizeof(s_info))) {
+ dev_warn(cd->dev,
+ "%s: copy_from_user failed\n",
+ __func__);
+ mutex_unlock(&cd->lock);
+ return -EFAULT;
+ }
+ ret = compdev_post_scene_info_locked(cd, &s_info);
+
+ break;
+
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&cd->lock);
+
+ return ret;
+}
+
+static const struct file_operations compdev_fops = {
+ .open = compdev_open,
+ .release = compdev_release,
+ .unlocked_ioctl = compdev_ioctl,
+};
+
+static void init_compdev(struct compdev *cd, const char *name)
+{
+ mutex_init(&cd->lock);
+ INIT_LIST_HEAD(&cd->list);
+ init_completion(&cd->fence);
+
+ cd->mdev.minor = MISC_DYNAMIC_MINOR;
+ cd->mdev.name = name;
+ cd->mdev.fops = &compdev_fops;
+ cd->dev = cd->mdev.this_device;
+}
+
+static void init_dss_context(struct dss_context *dss_ctx,
+ struct mcde_display_device *ddev, struct compdev *cd)
+{
+ dss_ctx->ddev = ddev;
+ dss_ctx->dev = cd->dev;
+ memset(&dss_ctx->cache_ctx, 0, sizeof(struct buffer_cache_context));
+ dss_ctx->cache_ctx.dev = dss_ctx->dev;
+}
+
+int compdev_create(struct mcde_display_device *ddev,
+ struct mcde_overlay *parent_ovly, bool mcde_rotation)
+{
+ int ret = 0;
+ int i;
+ struct compdev *cd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info;
+
+ char name[10];
+
+ if (dev_counter == 0) {
+ for (i = 0; i < MAX_NBR_OF_COMPDEVS; i++)
+ compdevs[i] = NULL;
+ }
+
+ if (dev_counter > MAX_NBR_OF_COMPDEVS)
+ return -ENOMEM;
+
+ cd = kzalloc(sizeof(struct compdev), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ compdevs[dev_counter] = cd;
+ cd->dev_index = dev_counter;
+
+ snprintf(name, sizeof(name), "%s%d", COMPDEV_DEFAULT_DEVICE_PREFIX,
+ dev_counter++);
+ init_compdev(cd, name);
+
+ init_dss_context(&cd->dss_ctx, ddev, cd);
+
+ mcde_dss_get_video_mode(ddev, &vmode);
+
+ cd->worker_thread = create_workqueue(name);
+ if (!cd->worker_thread) {
+ ret = -ENOMEM;
+ goto fail_workqueue;
+ }
+
+ cd->dss_ctx.ovly[0] = parent_ovly;
+ if (!cd->dss_ctx.ovly[0]) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ for (i = 1; i < NUM_COMPDEV_BUFS; i++) {
+ cd->dss_ctx.ovly[i] = mcde_dss_create_overlay(ddev, &info);
+ if (!cd->dss_ctx.ovly[i]) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+ if (mcde_dss_enable_overlay(cd->dss_ctx.ovly[i]))
+ goto fail_create_ovly;
+ if (disable_overlay(cd->dss_ctx.ovly[i]))
+ goto fail_create_ovly;
+ }
+
+ mcde_dss_get_native_resolution(ddev, &cd->dss_ctx.phy_size.width,
+ &cd->dss_ctx.phy_size.height);
+ cd->dss_ctx.display_rotation = mcde_dss_get_rotation(ddev);
+ cd->dss_ctx.current_buffer_rotation = 0;
+
+ cd->mcde_rotation = mcde_rotation;
+
+ ret = misc_register(&cd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&cd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+fail_create_ovly:
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ if (cd->dss_ctx.ovly[i])
+ mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]);
+ }
+fail_workqueue:
+ kfree(cd);
+out:
+ return ret;
+}
+
+
+int compdev_get(int dev_idx, struct compdev **cd_pp)
+{
+ struct compdev *cd;
+ cd = NULL;
+
+ if (dev_idx >= MAX_NBR_OF_COMPDEVS)
+ return -ENOMEM;
+
+ cd = compdevs[dev_idx];
+ if (cd != NULL) {
+ mutex_lock(&cd->lock);
+ cd->ref_count++;
+ mutex_unlock(&cd->lock);
+ *cd_pp = cd;
+ return 0;
+ } else {
+ return -ENOMEM;
+ }
+}
+EXPORT_SYMBOL(compdev_get);
+
+int compdev_put(struct compdev *cd)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+ cd->ref_count--;
+ if (cd->ref_count < 0)
+ dev_warn(cd->dev,
+ "%s: Incorrect ref count\n", __func__);
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_put);
+
+int compdev_get_size(struct compdev *cd, struct compdev_size *size)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_get_size_locked(&cd->dss_ctx, size);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_get_size);
+
+int compdev_get_listener_state(struct compdev *cd,
+ enum compdev_listener_state *listener_state)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_get_listener_state_locked(cd, listener_state);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_get_listener_state);
+
+
+int compdev_post_buffer(struct compdev *cd, struct compdev_img *img)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_post_buffer_locked(cd, img);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_post_buffer);
+
+int compdev_post_scene_info(struct compdev *cd,
+ struct compdev_scene_info *s_info)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_post_scene_info_locked(cd, s_info);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_post_scene_info);
+
+int compdev_register_listener_callbacks(struct compdev *cd, void *data,
+ post_buffer_callback pb_cb, post_scene_info_callback si_cb)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+ mutex_lock(&cd->lock);
+ cd->cb_data = data;
+ cd->pb_cb = pb_cb;
+ cd->si_cb = si_cb;
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_register_listener_callbacks);
+
+int compdev_deregister_callbacks(struct compdev *cd)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+ mutex_lock(&cd->lock);
+ cd->cb_data = NULL;
+ cd->pb_cb = NULL;
+ cd->si_cb = NULL;
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_deregister_callbacks);
+
+void compdev_destroy(struct mcde_display_device *ddev)
+{
+ struct compdev *cd;
+ struct compdev *tmp;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ if (cd->dss_ctx.ddev == ddev) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ for (i = 1; i < NUM_COMPDEV_BUFS; i++)
+ mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]);
+ b2r2_blt_close(cd->dss_ctx.blt_handle);
+
+ release_prev_frame(&cd->dss_ctx);
+
+ /* Free potential temp buffers */
+ for (i = 0; i < cd->dss_ctx.temp_img_count; i++)
+ free_comp_img_buf(cd->dss_ctx.temp_img[i],
+ cd->dev);
+
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cd->dss_ctx.cache_ctx.img[i]) {
+ free_comp_img_buf
+ (cd->dss_ctx.cache_ctx.img[i],
+ cd->dev);
+ cd->dss_ctx.cache_ctx.img[i] = NULL;
+ }
+ }
+
+ destroy_workqueue(cd->worker_thread);
+ kfree(cd);
+ break;
+ }
+ }
+ dev_counter--;
+ mutex_unlock(&dev_list_lock);
+}
+
+static void compdev_destroy_all(void)
+{
+ struct compdev *cd;
+ struct compdev *tmp;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++)
+ mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]);
+
+ release_prev_frame(&cd->dss_ctx);
+ /* Free potential temp buffers */
+ for (i = 0; i < cd->dss_ctx.temp_img_count; i++)
+ free_comp_img_buf(cd->dss_ctx.temp_img[i], cd->dev);
+
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cd->dss_ctx.cache_ctx.img[i]) {
+ free_comp_img_buf
+ (cd->dss_ctx.cache_ctx.img[i],
+ cd->dev);
+ cd->dss_ctx.cache_ctx.img[i] = NULL;
+ }
+ }
+
+ kfree(cd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init compdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(compdev_init);
+
+static void __exit compdev_exit(void)
+{
+ compdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(compdev_exit);
+
+MODULE_AUTHOR("Anders Bauer <anders.bauer@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display overlay device driver");
+
diff --git a/drivers/misc/dispdev/Makefile b/drivers/misc/dispdev/Makefile
new file mode 100644
index 00000000000..11dc7611d26
--- /dev/null
+++ b/drivers/misc/dispdev/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DISPDEV) += dispdev.o
diff --git a/drivers/misc/dispdev/dispdev.c b/drivers/misc/dispdev/dispdev.c
new file mode 100644
index 00000000000..5413a252d35
--- /dev/null
+++ b/drivers/misc/dispdev/dispdev.c
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display output device driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/ioctl.h>
+
+#include <linux/dispdev.h>
+#include <linux/hwmem.h>
+#include <video/mcde_dss.h>
+
+#define DENSITY_CHECK (16)
+#define MAX_BUFFERS 4
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+enum buffer_state {
+ BUF_UNUSED = 0,
+ BUF_QUEUED,
+ BUF_ACTIVATED,
+/*TODO:waitfordone BUF_DEACTIVATED,*/
+ BUF_FREE,
+ BUF_DEQUEUED,
+};
+
+struct dispdev_buffer {
+ struct hwmem_alloc *alloc;
+ u32 size;
+ enum buffer_state state;
+ u32 paddr; /* if pinned */
+};
+
+struct dispdev {
+ bool open;
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly;
+ struct mcde_overlay *parent_ovly;
+ struct dispdev_config config;
+ bool overlay;
+ struct dispdev_buffer buffers[MAX_BUFFERS];
+ wait_queue_head_t waitq_dq;
+ /*
+ * For the rotation use case
+ * buffers_need_update is used to ensure that a set_config that
+ * changes width or height is followed by a unregister_buffer.
+ */
+ bool buffers_need_update;
+ /*
+ * For the overlay startup use case.
+ * first_update is used to handle the first update after a set_config.
+ * In this case a queue_buffer will arrive after set_config and not a
+ * unregister_buffer as in the rotation use case.
+ */
+ bool first_update;
+ char name[sizeof(DISPDEV_DEFAULT_DEVICE_PREFIX) + 3];
+};
+
+static int find_buf(struct dispdev *dd, enum buffer_state state)
+{
+ int i;
+ for (i = 0; i < MAX_BUFFERS; i++)
+ if (dd->buffers[i].state == state)
+ return i;
+ return -1;
+}
+
+int dispdev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+
+ if (&dd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (dd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ dd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ ret = mcde_dss_enable_overlay(dd->ovly);
+ if (ret)
+ return ret;
+
+ file->private_data = dd;
+
+ return 0;
+}
+
+int dispdev_release(struct inode *inode, struct file *file)
+{
+ int i;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&dd->list == &dev_list)
+ return -ENODEV;
+
+ /* TODO: Make sure it waits for completion */
+ mcde_dss_disable_overlay(dd->ovly);
+ for (i = 0; i < MAX_BUFFERS; i++) {
+ if (dd->buffers[i].paddr)
+ hwmem_unpin(dd->buffers[i].alloc);
+ if (dd->buffers[i].alloc)
+ hwmem_release(dd->buffers[i].alloc);
+ dd->buffers[i].alloc = NULL;
+ dd->buffers[i].state = BUF_UNUSED;
+ dd->buffers[i].size = 0;
+ dd->buffers[i].paddr = 0;
+ }
+ dd->open = false;
+ wake_up(&dd->waitq_dq);
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case DISPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case DISPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case DISPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case DISPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case DISPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static void get_ovly_info(struct dispdev_config *cfg,
+ struct mcde_video_mode *vmode,
+ struct mcde_overlay_info *info, bool overlay)
+{
+ info->paddr = 0;
+ info->stride = cfg->stride;
+ info->fmt = get_ovly_fmt(cfg->format);
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = cfg->x;
+ info->dst_y = cfg->y;
+ info->dst_z = cfg->z;
+ info->w = cfg->width;
+ info->h = cfg->height;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = vmode->xres;
+ info->dirty.h = vmode->yres;
+}
+
+static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg)
+{
+ int ret = 0;
+ if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0)
+ return 0;
+
+ /*
+ * Only update MCDE if format, stride, width and height
+ * is the same. Otherwise just store the new config and update
+ * MCDE in the next queue buffer. This because the buffer that is
+ * active can be have the wrong format, width ...
+ */
+ if (cfg->format == dd->config.format &&
+ cfg->stride == dd->config.stride &&
+ cfg->width == dd->config.width &&
+ cfg->height == dd->config.height) {
+
+ int buf_index;
+ if (!dd->buffers_need_update) {
+ buf_index = find_buf(dd, BUF_ACTIVATED);
+ if (buf_index >= 0) {
+ struct mcde_overlay_info info;
+ struct dispdev_buffer *buf;
+ struct mcde_video_mode vmode;
+
+ buf = &dd->buffers[buf_index];
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(cfg, &vmode, &info, dd->overlay);
+ info.paddr = buf->paddr;
+ ret = mcde_dss_apply_overlay(dd->ovly, &info);
+ if (!ret)
+ mcde_dss_update_overlay(dd->ovly,
+ false);
+ }
+ }
+ } else {
+ dd->buffers_need_update = true;
+ }
+
+ dd->config = *cfg;
+
+ return ret;
+}
+
+static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name)
+{
+ int ret;
+ struct dispdev_buffer *buf;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+
+ ret = find_buf(dd, BUF_UNUSED);
+ if (ret < 0)
+ return -ENOMEM;
+ buf = &dd->buffers[ret];
+ buf->alloc = hwmem_resolve_by_name(hwmem_name);
+ if (IS_ERR(buf->alloc)) {
+ ret = PTR_ERR(buf->alloc);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buf->alloc, &buf->size, &memtype, &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ goto invalid_mem;
+ }
+
+ buf->state = BUF_FREE;
+ goto out;
+invalid_mem:
+ hwmem_release(buf->alloc);
+resolve_failed:
+out:
+ return ret;
+}
+
+static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx)
+{
+ struct dispdev_buffer *buf = &dd->buffers[buf_idx];
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers))
+ return -EINVAL;
+
+ if (buf->state == BUF_UNUSED)
+ return -EINVAL;
+
+ if (dd->buffers_need_update)
+ dd->buffers_need_update = false;
+
+ if (buf->state == BUF_ACTIVATED) {
+ /* Disable the overlay */
+ struct mcde_overlay_info info;
+ struct mcde_video_mode vmode;
+ /* TODO Wait for frame done */
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ hwmem_unpin(dd->buffers[buf_idx].alloc);
+ }
+
+ hwmem_release(buf->alloc);
+ buf->state = BUF_UNUSED;
+ buf->alloc = NULL;
+ buf->size = 0;
+ buf->paddr = 0;
+ dd->first_update = false;
+
+ return 0;
+}
+
+
+/**
+ * @brief Check if the buffer is transparent or black (ARGB = X000)
+ * Note: Only for ARGB32.
+ * Worst case: a ~full transparent buffer
+ * Results: ~2200us @800Mhz for a WVGA screen, with DENSITY_CHECK=8
+ * ~520us @800Mhz for a WVGA screen, with DENSITY_CHECK=16
+ *
+ * @param w witdh
+ * @param h height
+ * @param addr buffer addr
+ *
+ * @return 1 if the buffer is transparent, else 0
+ */
+static int is_transparent(int w, int h, u32 *addr)
+{
+ int i, j;
+ u32 *c, *next_line;
+ u32 sum;
+
+ next_line = addr;
+ sum = 0;
+
+ /* TODO Optimize me */
+ for (j = 0; j < h; j += DENSITY_CHECK) {
+ c = next_line;
+ for (i = 0; i < w; i += DENSITY_CHECK) {
+ sum += ((*c) & 0x00FFFFFF);
+ c += DENSITY_CHECK;
+ }
+ if (sum)
+ return 0; /* Not "transparent" */
+ next_line += (w * DENSITY_CHECK);
+ }
+
+ return 1; /* "Transparent" */
+}
+
+static int dispdev_queue_buffer(struct dispdev *dd,
+ struct dispdev_buffer_info *buffer)
+{
+ int ret, i;
+ struct mcde_overlay_info info;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct hwmem_alloc *alloc;
+ struct mcde_video_mode vmode;
+ u32 buf_idx = buffer->buf_idx;
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers) ||
+ dd->buffers[buf_idx].state != BUF_DEQUEUED)
+ return -EINVAL;
+
+ alloc = dd->buffers[buf_idx].alloc;
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret);
+ return -EINVAL;
+ }
+
+ rgn.size = rgn.end = dd->buffers[buf_idx].size;
+ ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret);
+
+ i = find_buf(dd, BUF_ACTIVATED);
+ if (i >= 0) {
+ dd->buffers[i].state = BUF_FREE;
+ wake_up(&dd->waitq_dq);
+ }
+
+ if (!dd->first_update) {
+ dd->first_update = true;
+ dd->buffers_need_update = false;
+ }
+
+ dd->buffers[buf_idx].paddr = mem_chunk.paddr;
+
+ if (buffer->display_update && !dd->buffers_need_update &&
+ dd->config.width == buffer->buf_cfg.width &&
+ dd->config.height == buffer->buf_cfg.height &&
+ dd->config.format == buffer->buf_cfg.format &&
+ dd->config.stride == buffer->buf_cfg.stride) {
+ info.paddr = mem_chunk.paddr;
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ } else if (buffer->display_update) {
+ dd->buffers_need_update = true;
+ }
+
+ /* Disable the MCDE FB overlay */
+ if ((dd->parent_ovly->state != NULL) &&
+ (dd->ddev->check_transparency)) {
+ dd->ddev->check_transparency--;
+ mcde_dss_get_overlay_info(dd->parent_ovly, &info);
+ if (dd->ddev->check_transparency == 0) {
+ if (is_transparent(info.w, info.h, info.vaddr)) {
+ mcde_dss_disable_overlay(dd->parent_ovly);
+ printk(KERN_INFO "%s Disable overlay\n",
+ __func__);
+ }
+ }
+ }
+
+ dd->buffers[buf_idx].state = BUF_ACTIVATED;
+
+ return 0;
+}
+
+static int dispdev_dequeue_buffer(struct dispdev *dd)
+{
+ int i;
+
+ i = find_buf(dd, BUF_FREE);
+ if (i < 0) {
+ if (find_buf(dd, BUF_ACTIVATED) < 0)
+ return -EINVAL;
+ mutex_unlock(&dd->lock);
+ wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0);
+ mutex_lock(&dd->lock);
+ }
+ hwmem_unpin(dd->buffers[i].alloc);
+ dd->buffers[i].state = BUF_DEQUEUED;
+ dd->buffers[i].paddr = 0;
+
+ return i;
+}
+
+long dispdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct dispdev *dd = (struct dispdev *)file->private_data;
+
+ mutex_lock(&dd->lock);
+
+ switch (cmd) {
+ case DISPDEV_SET_CONFIG_IOC:
+ {
+ struct dispdev_config cfg;
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(cfg)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_set_config(dd, &cfg);
+ }
+ break;
+ case DISPDEV_GET_CONFIG_IOC:
+ ret = copy_to_user((void __user *)arg, &dd->config,
+ sizeof(dd->config));
+ if (ret)
+ ret = -EFAULT;
+ break;
+ case DISPDEV_REGISTER_BUFFER_IOC:
+ ret = dispdev_register_buffer(dd, (s32)arg);
+ break;
+ case DISPDEV_UNREGISTER_BUFFER_IOC:
+ ret = dispdev_unregister_buffer(dd, (u32)arg);
+ break;
+ case DISPDEV_QUEUE_BUFFER_IOC:
+ {
+ struct dispdev_buffer_info buffer;
+ if (copy_from_user(&buffer, (void __user *)arg,
+ sizeof(buffer)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_queue_buffer(dd, &buffer);
+ break;
+ }
+ case DISPDEV_DEQUEUE_BUFFER_IOC:
+ ret = dispdev_dequeue_buffer(dd);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&dd->lock);
+
+ return ret;
+}
+
+static const struct file_operations dispdev_fops = {
+ .open = dispdev_open,
+ .release = dispdev_release,
+ .unlocked_ioctl = dispdev_ioctl,
+};
+
+static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev,
+ const char *name, bool overlay)
+{
+ u16 w, h;
+ int rotation;
+
+ mutex_init(&dd->lock);
+ INIT_LIST_HEAD(&dd->list);
+ dd->ddev = ddev;
+ dd->overlay = overlay;
+ mcde_dss_get_native_resolution(ddev, &w, &h);
+ rotation = mcde_dss_get_rotation(ddev);
+
+ if ((rotation == MCDE_DISPLAY_ROT_90_CCW) ||
+ (rotation == MCDE_DISPLAY_ROT_90_CW)) {
+ dd->config.width = h;
+ dd->config.height = w;
+ } else {
+ dd->config.width = w;
+ dd->config.height = h;
+ }
+ dd->config.format = DISPDEV_FMT_RGB565;
+ dd->config.stride = sizeof(u16) * w;
+ dd->config.x = 0;
+ dd->config.y = 0;
+ dd->config.z = 0;
+ dd->buffers_need_update = false;
+ dd->first_update = false;
+ init_waitqueue_head(&dd->waitq_dq);
+ dd->mdev.minor = MISC_DYNAMIC_MINOR;
+ dd->mdev.name = name;
+ dd->mdev.fops = &dispdev_fops;
+ pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name,
+ dd->config.width, dd->config.height, dd->config.format,
+ dd->config.stride);
+}
+
+int dispdev_create(struct mcde_display_device *ddev, bool overlay,
+ struct mcde_overlay *parent_ovly)
+{
+ int ret = 0;
+ struct dispdev *dd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info = {0};
+
+ static int counter;
+
+ dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ snprintf(dd->name, sizeof(dd->name), "%s%d",
+ DISPDEV_DEFAULT_DEVICE_PREFIX, counter++);
+ init_dispdev(dd, ddev, dd->name, overlay);
+
+ if (!overlay) {
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto fail_enable_display;
+ mcde_dss_get_video_mode(ddev, &vmode);
+ mcde_dss_try_video_mode(ddev, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ goto fail_set_video_mode;
+ mcde_dss_set_pixel_format(ddev, info.fmt);
+ mcde_dss_apply_channel(ddev);
+ } else
+ mcde_dss_get_video_mode(ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, overlay);
+
+ /* Save the MCDE FB overlay */
+ dd->parent_ovly = parent_ovly;
+
+ dd->ovly = mcde_dss_create_overlay(ddev, &info);
+ if (!dd->ovly) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ ret = misc_register(&dd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&dd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+ mcde_dss_destroy_overlay(dd->ovly);
+fail_create_ovly:
+ if (!overlay)
+ mcde_dss_disable_display(ddev);
+fail_set_video_mode:
+fail_enable_display:
+ kfree(dd);
+out:
+ return ret;
+}
+
+void dispdev_destroy(struct mcde_display_device *ddev)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ if (dd->ddev == ddev) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ break;
+ }
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void dispdev_destroy_all(void)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init dispdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(dispdev_init);
+
+static void __exit dispdev_exit(void)
+{
+ dispdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(dispdev_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display output device driver");
+
diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile
new file mode 100644
index 00000000000..c307616a181
--- /dev/null
+++ b/drivers/misc/hwmem/Makefile
@@ -0,0 +1,3 @@
+hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o
+
+obj-$(CONFIG_HWMEM) += hwmem.o
diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c
new file mode 100644
index 00000000000..e0ab4ee6cf8
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/hwmem.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/dcache.h>
+
+#include "cache_handler.h"
+
+#define U32_MAX (~(u32)0)
+
+enum hwmem_alloc_flags cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings);
+void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
+ pgprot_t *pgprot);
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region);
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region);
+
+static void invalidate_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void clean_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void flush_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+
+static void null_range(struct cach_range *range);
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add);
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range);
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove);
+static bool is_non_empty_range(struct cach_range *range);
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection);
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment);
+static u32 range_length(struct cach_range *range);
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range);
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset);
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset);
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment);
+static u32 align_down(u32 value, u32 alignment);
+
+/*
+ * Exported functions
+ */
+
+void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings,
+ u32 size)
+{
+ buf->vstart = NULL;
+ buf->pstart = 0;
+ buf->size = size;
+
+ buf->cache_settings = cachi_get_cache_settings(cache_settings);
+}
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr)
+{
+ bool tmp;
+
+ buf->vstart = vaddr;
+ buf->pstart = paddr;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ /*
+ * Keep whatever is in the cache. This way we avoid an
+ * unnecessary synch if CPU is the first user.
+ */
+ buf->range_in_cpu_cache.start = 0;
+ buf->range_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_in_cpu_cache,
+ get_dcache_granularity());
+ buf->range_dirty_in_cpu_cache.start = 0;
+ buf->range_dirty_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_dirty_in_cpu_cache,
+ get_dcache_granularity());
+ } else {
+ flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false,
+ &tmp);
+ drain_cpu_write_buf();
+
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ }
+ null_range(&buf->range_invalid_in_cpu_cache);
+}
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot)
+{
+ cachi_set_pgprot_cache_options(buf->cache_settings, pgprot);
+}
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ struct hwmem_region *__region;
+ struct hwmem_region full_region;
+
+ if (region != NULL) {
+ __region = region;
+ } else {
+ full_region.offset = 0;
+ full_region.count = 1;
+ full_region.start = 0;
+ full_region.end = buf->size;
+ full_region.size = buf->size;
+
+ __region = &full_region;
+ }
+
+ switch (domain) {
+ case HWMEM_DOMAIN_SYNC:
+ sync_buf_post_cpu(buf, access, __region);
+
+ break;
+
+ case HWMEM_DOMAIN_CPU:
+ sync_buf_pre_cpu(buf, access, __region);
+
+ break;
+ }
+}
+
+/*
+ * Local functions
+ */
+
+enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings)
+{
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+ /* We don't know the cache setting so we assume worst case. */
+ static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+
+ if (requested_cache_settings & CACHE_ON_FLAGS_MASK)
+ return CACHE_SETTING;
+ else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE ||
+ (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED &&
+ !(requested_cache_settings &
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE)))
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+ else if (requested_cache_settings &
+ (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED))
+ return 0;
+ else
+ /* Nothing specified, use cached */
+ return CACHE_SETTING;
+}
+
+void __attribute__((weak)) cachi_set_pgprot_cache_options(
+ enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot)
+{
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED)
+ *pgprot = *pgprot; /* To silence compiler and checkpatch */
+ else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE)
+ *pgprot = pgprot_writecombine(*pgprot);
+ else
+ *pgprot = pgprot_noncached(*pgprot);
+}
+
+bool __attribute__((weak)) speculative_data_prefetch(void)
+{
+ /* We don't know so we go with the safe alternative */
+ return true;
+}
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region)
+{
+ bool write = access & HWMEM_ACCESS_WRITE;
+ bool read = access & HWMEM_ACCESS_READ;
+
+ if (!write && !read)
+ return;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ struct cach_range region_range;
+
+ region_2_range(region, buf->size, &region_range);
+
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_WB))
+ /* Perform defered invalidates */
+ invalidate_cpu_cache(buf, &region_range);
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_AOW))
+ expand_range(&buf->range_in_cpu_cache, &region_range);
+ if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) {
+ struct cach_range dirty_range_addition;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ dirty_range_addition = region_range;
+ else
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &dirty_range_addition);
+
+ expand_range(&buf->range_dirty_in_cpu_cache,
+ &dirty_range_addition);
+ }
+ }
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) {
+ if (write)
+ buf->in_cpu_write_buf = true;
+ }
+}
+
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region)
+{
+ bool write = next_access & HWMEM_ACCESS_WRITE;
+ bool read = next_access & HWMEM_ACCESS_READ;
+ struct cach_range region_range;
+
+ if (!write && !read)
+ return;
+
+ region_2_range(next_region, buf->size, &region_range);
+
+ if (write) {
+ if (speculative_data_prefetch()) {
+ /* Defer invalidate */
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &intersection);
+
+ expand_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+
+ clean_cpu_cache(buf, &region_range);
+ } else {
+ flush_cpu_cache(buf, &region_range);
+ }
+ }
+ if (read)
+ clean_cpu_cache(buf, &region_range);
+
+ if (buf->in_cpu_write_buf) {
+ drain_cpu_write_buf();
+
+ buf->in_cpu_write_buf = false;
+ }
+}
+
+static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_invalid_in_cpu_cache, range,
+ &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_invalid_in_cpu_cache);
+
+ /*
+ * Cache handler never uses invalidate to discard data in the
+ * cache so we can use flush instead which is considerably
+ * faster for large buffers.
+ */
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ null_range(&buf->range_invalid_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ } else {
+ /*
+ * No need to shrink range_in_cpu_cache as invalidate
+ * is only used when we can't keep track of what's in
+ * the CPU cache.
+ */
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool cleaned_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_dirty_in_cpu_cache);
+
+ clean_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &cleaned_everything);
+
+ if (cleaned_everything)
+ null_range(&buf->range_dirty_in_cpu_cache);
+ else
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ }
+}
+
+static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection, &buf->range_in_cpu_cache);
+
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ if (!speculative_data_prefetch())
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ null_range(&buf->range_invalid_in_cpu_cache);
+ } else {
+ if (!speculative_data_prefetch())
+ shrink_range(&buf->range_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void null_range(struct cach_range *range)
+{
+ range->start = U32_MAX;
+ range->end = 0;
+}
+
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add)
+{
+ range->start = min(range->start, range_2_add->start);
+ range->end = max(range->end, range_2_add->end);
+}
+
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range)
+{
+ u32 space_on_low_side = range->start - enclosing_range->start;
+ u32 space_on_high_side = enclosing_range->end - range->end;
+
+ if (space_on_low_side < space_on_high_side)
+ range->start = enclosing_range->start;
+ else
+ range->end = enclosing_range->end;
+}
+
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove)
+{
+ if (range_2_remove->start > range->start)
+ range->end = min(range->end, range_2_remove->start);
+ else
+ range->start = max(range->start, range_2_remove->end);
+
+ if (range->start >= range->end)
+ null_range(range);
+}
+
+static bool is_non_empty_range(struct cach_range *range)
+{
+ return range->end > range->start;
+}
+
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection)
+{
+ intersection->start = max(range_1->start, range_2->start);
+ intersection->end = min(range_1->end, range_2->end);
+
+ if (intersection->start >= intersection->end)
+ null_range(intersection);
+}
+
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment)
+{
+ if (!is_non_empty_range(range))
+ return;
+
+ range->start = align_down(range->start, alignment);
+ range->end = align_up(range->end, alignment);
+}
+
+static u32 range_length(struct cach_range *range)
+{
+ if (is_non_empty_range(range))
+ return range->end - range->start;
+ else
+ return 0;
+}
+
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range)
+{
+ /*
+ * We don't care about invalid regions, instead we limit the region's
+ * range to the buffer's range. This should work good enough, worst
+ * case we synch the entire buffer when we get an invalid region which
+ * is acceptable.
+ */
+ range->start = region->offset + region->start;
+ range->end = min(region->offset + (region->count * region->size) -
+ (region->size - region->end), buffer_size);
+ if (range->start >= range->end) {
+ null_range(range);
+ return;
+ }
+
+ align_range_up(range, get_dcache_granularity());
+}
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset)
+{
+ return (void *)((u32)buf->vstart + offset);
+}
+
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset)
+{
+ return buf->pstart + offset;
+}
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ u32 value_2_add;
+
+ if (remainder == 0)
+ return value;
+
+ value_2_add = alignment - remainder;
+
+ if (value_2_add > U32_MAX - value) /* Will overflow */
+ return U32_MAX;
+
+ return value + value_2_add;
+}
+
+static u32 align_down(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ if (remainder == 0)
+ return value;
+
+ return value - remainder;
+}
diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h
new file mode 100644
index 00000000000..792105196fa
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * Cache handler can not handle simultaneous execution! The caller has to
+ * ensure such a situation does not occur.
+ */
+
+#ifndef _CACHE_HANDLER_H_
+#define _CACHE_HANDLER_H_
+
+#include <linux/types.h>
+#include <linux/hwmem.h>
+
+/*
+ * To not have to double all datatypes we've used hwmem datatypes. If someone
+ * want's to use cache handler but not hwmem then we'll have to define our own
+ * datatypes.
+ */
+
+struct cach_range {
+ u32 start; /* Inclusive */
+ u32 end; /* Exclusive */
+};
+
+/*
+ * Internal, do not touch!
+ */
+struct cach_buf {
+ void *vstart;
+ u32 pstart;
+ u32 size;
+
+ /* Remaining hints are active */
+ enum hwmem_alloc_flags cache_settings;
+
+ bool in_cpu_write_buf;
+ struct cach_range range_in_cpu_cache;
+ struct cach_range range_dirty_in_cpu_cache;
+ struct cach_range range_invalid_in_cpu_cache;
+};
+
+void cach_init_buf(struct cach_buf *buf,
+ enum hwmem_alloc_flags cache_settings, u32 size);
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr);
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot);
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+#endif /* _CACHE_HANDLER_H_ */
diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c
new file mode 100644
index 00000000000..31533ed5988
--- /dev/null
+++ b/drivers/misc/hwmem/contig_alloc.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Contiguous memory allocator
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <asm/sizes.h>
+
+#define MAX_INSTANCE_NAME_LENGTH 31
+
+struct alloc {
+ struct list_head list;
+
+ bool in_use;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+struct instance {
+ struct list_head list;
+
+ char name[MAX_INSTANCE_NAME_LENGTH + 1];
+
+ phys_addr_t region_paddr;
+ void *region_kaddr;
+ size_t region_size;
+
+ struct list_head alloc_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct inode *debugfs_inode;
+ int cona_status_free;
+ int cona_status_used;
+ int cona_status_max_cont;
+ int cona_status_max_check;
+ int cona_status_biggest_free;
+ int cona_status_printed;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static LIST_HEAD(instance_list);
+
+static DEFINE_MUTEX(lock);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size);
+void *cona_alloc(void *instance, size_t size);
+void cona_free(void *instance, void *alloc);
+phys_addr_t cona_get_alloc_paddr(void *alloc);
+void *cona_get_alloc_kaddr(void *instance, void *alloc);
+size_t cona_get_alloc_size(void *alloc);
+
+static int init_alloc_list(struct instance *instance);
+static void clean_alloc_list(struct instance *instance);
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size);
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size);
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size)
+{
+ int ret;
+ struct instance *instance;
+ struct vm_struct *vm_area;
+
+ if (region_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (instance == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1);
+ /* Truncate name if necessary */
+ instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0';
+ instance->region_paddr = region_paddr;
+ instance->region_size = region_size;
+
+ vm_area = get_vm_area(region_size, VM_IOREMAP);
+ if (vm_area == NULL) {
+ printk(KERN_WARNING "CONA: Failed to allocate %u bytes"
+ " kernel virtual memory", region_size);
+ ret = -ENOMSG;
+ goto vmem_alloc_failed;
+ }
+ instance->region_kaddr = vm_area->addr;
+
+ INIT_LIST_HEAD(&instance->alloc_list);
+ ret = init_alloc_list(instance);
+ if (ret < 0)
+ goto init_alloc_list_failed;
+
+ mutex_lock(&lock);
+ list_add_tail(&instance->list, &instance_list);
+ mutex_unlock(&lock);
+
+ return instance;
+
+init_alloc_list_failed:
+ vm_area = remove_vm_area(instance->region_kaddr);
+ if (vm_area == NULL)
+ printk(KERN_ERR "CONA: Failed to free kernel virtual memory,"
+ " resource leak!\n");
+
+ kfree(vm_area);
+vmem_alloc_failed:
+ kfree(instance);
+
+ return ERR_PTR(ret);
+}
+
+void *cona_alloc(void *instance, size_t size)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc;
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ alloc = find_free_alloc_bestfit(instance_l, size);
+ if (IS_ERR(alloc))
+ goto out;
+ if (size < alloc->size) {
+ alloc = split_allocation(alloc, size);
+ if (IS_ERR(alloc))
+ goto out;
+ } else {
+ alloc->in_use = true;
+ }
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont += alloc->size;
+ instance_l->cona_status_max_check =
+ max(instance_l->cona_status_max_check,
+ instance_l->cona_status_max_cont);
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+
+void cona_free(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc_l = (struct alloc *)alloc;
+ struct alloc *other;
+
+ mutex_lock(&lock);
+
+ alloc_l->in_use = false;
+
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont -= alloc_l->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ other = list_entry(alloc_l->list.prev, struct alloc, list);
+ if ((alloc_l->list.prev != &instance_l->alloc_list) &&
+ !other->in_use) {
+ other->size += alloc_l->size;
+ list_del(&alloc_l->list);
+ kfree(alloc_l);
+ alloc_l = other;
+ }
+ other = list_entry(alloc_l->list.next, struct alloc, list);
+ if ((alloc_l->list.next != &instance_l->alloc_list) &&
+ !other->in_use) {
+ alloc_l->size += other->size;
+ list_del(&other->list);
+ kfree(other);
+ }
+
+ mutex_unlock(&lock);
+}
+
+phys_addr_t cona_get_alloc_paddr(void *alloc)
+{
+ return ((struct alloc *)alloc)->paddr;
+}
+
+void *cona_get_alloc_kaddr(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+
+ return instance_l->region_kaddr + get_alloc_offset(instance_l,
+ (struct alloc *)alloc);
+}
+
+size_t cona_get_alloc_size(void *alloc)
+{
+ return ((struct alloc *)alloc)->size;
+}
+
+static int init_alloc_list(struct instance *instance)
+{
+ /*
+ * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't
+ * handle that.
+ */
+ int ret;
+ u32 curr_pos = instance->region_paddr;
+ u32 region_end = instance->region_paddr + instance->region_size;
+ u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1);
+ struct alloc *alloc;
+
+ if (PAGE_SIZE >= SZ_64M) {
+ printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n");
+ return -ENOMSG;
+ }
+
+ while (next_64mib_boundary < region_end) {
+ if (next_64mib_boundary - curr_pos > PAGE_SIZE) {
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = next_64mib_boundary - curr_pos -
+ PAGE_SIZE;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = PAGE_SIZE;
+ alloc->in_use = true;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+
+#ifdef CONFIG_DEBUG_FS
+ instance->cona_status_max_cont += alloc->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ next_64mib_boundary += SZ_64M;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = region_end - curr_pos;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+
+ return 0;
+
+error:
+ clean_alloc_list(instance);
+
+ return ret;
+}
+
+static void clean_alloc_list(struct instance *instance)
+{
+ while (list_empty(&instance->alloc_list) == 0) {
+ struct alloc *i = list_first_entry(&instance->alloc_list,
+ struct alloc, list);
+
+ list_del(&i->list);
+
+ kfree(i);
+ }
+}
+
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size)
+{
+ size_t best_diff = ~(size_t)0;
+ struct alloc *alloc = NULL, *i;
+
+ list_for_each_entry(i, &instance->alloc_list, list) {
+ size_t diff = i->size - size;
+ if (i->in_use || i->size < size)
+ continue;
+ if (diff < best_diff) {
+ alloc = i;
+ best_diff = diff;
+ }
+ }
+
+ return alloc != NULL ? alloc : ERR_PTR(-ENOMEM);
+}
+
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size)
+{
+ struct alloc *new_alloc;
+
+ new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (new_alloc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ new_alloc->in_use = true;
+ new_alloc->paddr = alloc->paddr;
+ new_alloc->size = new_alloc_size;
+ alloc->size -= new_alloc_size;
+ alloc->paddr += new_alloc_size;
+
+ list_add_tail(&new_alloc->list, &alloc->list);
+
+ return new_alloc;
+}
+
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc)
+{
+ return alloc->paddr - instance->region_paddr;
+}
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size);
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size);
+static struct instance *get_instance_from_file(struct file *file);
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ if (i == 1) {
+ if (alloc->in_use)
+ instance->cona_status_used += alloc->size;
+ else
+ instance->cona_status_free += alloc->size;
+ }
+
+ if (!alloc->in_use) {
+ instance->cona_status_biggest_free =
+ max((size_t)alloc->size,
+ (size_t)instance->cona_status_biggest_free);
+ }
+
+ ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t"
+ "in use: %1u\t used: %10u (%dMB)"
+ " \t free: %10u (%dMB)\n",
+ alloc->paddr,
+ alloc->size,
+ alloc->in_use,
+ instance->cona_status_used,
+ instance->cona_status_used/1024/1024,
+ instance->cona_status_free,
+ instance->cona_status_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l, "Overall peak usage:\t%10u "
+ "(%dMB)\nCurrent max usage:\t%10u (%dMB)\n"
+ "Current biggest free:\t%10d (%dMB)\n",
+ instance->cona_status_max_check,
+ instance->cona_status_max_check/1024/1024,
+ instance->cona_status_max_cont,
+ instance->cona_status_max_cont/1024/1024,
+ instance->cona_status_biggest_free,
+ instance->cona_status_biggest_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static struct instance *get_instance_from_file(struct file *file)
+{
+ struct instance *curr_instance;
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ if (file->f_dentry->d_inode == curr_instance->debugfs_inode)
+ return curr_instance;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ struct instance *instance;
+ struct alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+ bool readout_aborted = false;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+ instance = get_instance_from_file(file);
+ if (IS_ERR(instance)) {
+ ret = PTR_ERR(instance);
+ goto out;
+ }
+
+ list_for_each_entry(curr_alloc, &instance->alloc_list, list) {
+ phys_addr_t alloc_offset = get_alloc_offset(instance,
+ curr_alloc);
+ if (alloc_offset < (phys_addr_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(instance, curr_alloc, &local_buf_pos,
+ available_space - (size_t)(local_buf_pos -
+ local_buf));
+
+ if (ret == -EINVAL) { /* No more room */
+ readout_aborted = true;
+ break;
+ } else if (ret < 0) {
+ goto out;
+ }
+ /*
+ * There could be an overflow issue here in the unlikely case
+ * where the region is placed at the end of the address range
+ * and the last alloc is 1 byte large. Since this is debug code
+ * and that case most likely never will happen I've chosen to
+ * defer fixing it till it happens.
+ */
+ *curr_pos = (void *)(alloc_offset + 1);
+
+ /* Make sure to also print status if there were any prints */
+ instance->cona_status_printed = false;
+ }
+
+ if (!readout_aborted && !instance->cona_status_printed) {
+ ret = print_alloc_status(instance, &local_buf_pos,
+ available_space -
+ (size_t)(local_buf_pos - local_buf));
+
+ if (ret == -EINVAL) /* No more room */
+ readout_aborted = true;
+ else if (ret < 0)
+ goto out;
+ else
+ instance->cona_status_printed = true;
+ }
+
+ if (!readout_aborted) {
+ instance->cona_status_free = 0;
+ instance->cona_status_used = 0;
+ instance->cona_status_biggest_free = 0;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static int __init init_debugfs(void)
+{
+ struct instance *curr_instance;
+ struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL);
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ struct dentry *file_dentry;
+ char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1];
+ tmp_str[0] = '\0';
+ strcat(tmp_str, curr_instance->name);
+ strcat(tmp_str, "_allocs");
+ file_dentry = debugfs_create_file(tmp_str, 0444,
+ debugfs_root_dir, 0, &debugfs_allocs_fops);
+ if (file_dentry != NULL)
+ curr_instance->debugfs_inode = file_dentry->d_inode;
+ }
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+/*
+ * Must be executed after all instances have been created, hence the
+ * late_initcall.
+ */
+late_initcall(init_debugfs);
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c
new file mode 100644
index 00000000000..e9e50de78bd
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-ioctl.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mm_types.h>
+#include <linux/hwmem.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+static int hwmem_open(struct inode *inode, struct file *file);
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma);
+static int hwmem_release_fop(struct inode *inode, struct file *file);
+static long hwmem_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+static const struct file_operations hwmem_fops = {
+ .open = hwmem_open,
+ .mmap = hwmem_ioctl_mmap,
+ .unlocked_ioctl = hwmem_ioctl,
+ .release = hwmem_release_fop,
+ .get_unmapped_area = hwmem_get_unmapped_area,
+};
+
+static struct miscdevice hwmem_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hwmem",
+ .fops = &hwmem_fops,
+};
+
+struct hwmem_file {
+ struct mutex lock;
+ struct idr idr; /* id -> struct hwmem_alloc*, ref counted */
+ struct hwmem_alloc *fd_alloc; /* Ref counted */
+};
+
+static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
+{
+ int id, ret;
+
+ while (true) {
+ if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ return -ENOMEM;
+ }
+
+ /*
+ * IDR always returns the lowest free id so there is no wrapping issue
+ * because of this.
+ */
+ if (id >= (s32)1 << (31 - PAGE_SHIFT)) {
+ dev_err(hwmem_device.this_device, "Out of IDs!\n");
+ idr_remove(&hwfile->idr, id);
+ return -ENOMSG;
+ }
+
+ return (s32)id << PAGE_SHIFT;
+}
+
+static void remove_id(struct hwmem_file *hwfile, s32 id)
+{
+ idr_remove(&hwfile->idr, id >> PAGE_SHIFT);
+}
+
+static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) :
+ hwfile->fd_alloc;
+ if (alloc == NULL)
+ alloc = ERR_PTR(-EINVAL);
+
+ return alloc;
+}
+
+static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+}
+
+static int release(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ if (id == 0)
+ return -EINVAL;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ remove_id(hwfile, id);
+ hwmem_release(alloc);
+
+ return 0;
+}
+
+static int set_cpu_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU,
+ (struct hwmem_region *)&req->region);
+}
+
+static int set_sync_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC,
+ (struct hwmem_region *)&req->region);
+}
+
+static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, NULL, &mem_type, NULL);
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS)
+ return -EINVAL;
+
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret < 0)
+ return ret;
+
+ req->phys_addr = mem_chunk.paddr;
+
+ return 0;
+}
+
+static int unpin(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_unpin(alloc);
+
+ return 0;
+}
+
+static int set_access(struct hwmem_file *hwfile,
+ struct hwmem_set_access_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_access(alloc, req->access, req->pid);
+}
+
+static int get_info(struct hwmem_file *hwfile,
+ struct hwmem_get_info_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access);
+
+ return 0;
+}
+
+static s32 export(struct hwmem_file *hwfile, s32 id)
+{
+ s32 ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /*
+ * The user could be about to send the buffer to a driver but
+ * there is a chance the current thread group don't have import rights
+ * if it gained access to the buffer via a inter-process fd transfer
+ * (fork, Android binder), if this is the case the driver will not be
+ * able to resolve the buffer name. To avoid this situation we give the
+ * current thread group import rights. This will not breach the
+ * security as the process already has access to the buffer (otherwise
+ * it would not be able to get here).
+ */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+
+ ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT),
+ task_tgid_nr(current));
+ if (ret < 0)
+ return ret;
+
+ return hwmem_get_name(alloc);
+}
+
+static s32 import(struct hwmem_file *hwfile, s32 name)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ goto error;
+
+ return ret;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int import_fd(struct hwmem_file *hwfile, s32 name)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int hwmem_open(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile;
+
+ hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL);
+ if (hwfile == NULL)
+ return -ENOMEM;
+
+ idr_init(&hwfile->idr);
+ mutex_init(&hwfile->lock);
+ file->private_data = hwfile;
+
+ return 0;
+}
+
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&hwfile->lock);
+
+ alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT);
+ if (IS_ERR(alloc)) {
+ ret = PTR_ERR(alloc);
+ goto out;
+ }
+
+ ret = hwmem_mmap(alloc, vma);
+
+out:
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data)
+{
+ hwmem_release((struct hwmem_alloc *)ptr);
+
+ return 0;
+}
+
+static int hwmem_release_fop(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL);
+ idr_remove_all(&hwfile->idr);
+ idr_destroy(&hwfile->idr);
+
+ if (hwfile->fd_alloc)
+ hwmem_release(hwfile->fd_alloc);
+
+ mutex_destroy(&hwfile->lock);
+
+ kfree(hwfile);
+
+ return 0;
+}
+
+static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOSYS;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ mutex_lock(&hwfile->lock);
+
+ switch (cmd) {
+ case HWMEM_ALLOC_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc(hwfile, &req);
+ }
+ break;
+ case HWMEM_ALLOC_FD_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc_fd(hwfile, &req);
+ }
+ break;
+ case HWMEM_RELEASE_IOC:
+ ret = release(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_CPU_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_cpu_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_SET_SYNC_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_sync_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_PIN_IOC:
+ {
+ struct hwmem_pin_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ else
+ ret = pin(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_UNPIN_IOC:
+ ret = unpin(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_ACCESS_IOC:
+ {
+ struct hwmem_set_access_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_access_request)))
+ ret = -EFAULT;
+ else
+ ret = set_access(hwfile, &req);
+ }
+ break;
+ case HWMEM_GET_INFO_IOC:
+ {
+ struct hwmem_get_info_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ else
+ ret = get_info(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_EXPORT_IOC:
+ ret = export(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_IOC:
+ ret = import(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_FD_IOC:
+ ret = import_fd(hwfile, (s32)arg);
+ break;
+ }
+
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ /*
+ * pgoff will not be valid as it contains a buffer id (right shifted
+ * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass
+ * on file or pgoff.
+ */
+ return current->mm->get_unmapped_area(NULL, addr, len, 0, flags);
+}
+
+int __init hwmem_ioctl_init(void)
+{
+ if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 ||
+ sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 ||
+ sizeof(enum hwmem_access) != 4 ||
+ sizeof(enum hwmem_mem_type) != 4) {
+ dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT"
+ " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||"
+ " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum"
+ " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)"
+ " != 4\n");
+ return -ENOMSG;
+ }
+ if (PAGE_SHIFT > 15)
+ dev_warn(hwmem_device.this_device, "Due to the page size only"
+ " %u id:s per file instance are available\n",
+ ((u32)1 << (31 - PAGE_SHIFT)) - 1);
+
+ return misc_register(&hwmem_device);
+}
+
+void __exit hwmem_ioctl_exit(void)
+{
+ misc_deregister(&hwmem_device);
+}
diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c
new file mode 100644
index 00000000000..b91d99bc2be
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-main.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pid.h>
+#include <linux/list.h>
+#include <linux/hwmem.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>
+#include "cache_handler.h"
+
+#define S32_MAX 2147483647
+
+struct hwmem_alloc_threadg_info {
+ struct list_head list;
+
+ struct pid *threadg_pid; /* Ref counted */
+
+ enum hwmem_access access;
+};
+
+struct hwmem_alloc {
+ struct list_head list;
+
+ atomic_t ref_cnt;
+
+ enum hwmem_alloc_flags flags;
+ struct hwmem_mem_type_struct *mem_type;
+
+ void *allocator_hndl;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ s32 name;
+
+ /* Access control */
+ enum hwmem_access default_access;
+ struct list_head threadg_info_list;
+
+ /* Cache handling */
+ struct cach_buf cach_buf;
+
+#ifdef CONFIG_DEBUG_FS
+ /* Debug */
+ void *creator;
+ pid_t creator_tgid;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static struct platform_device *hwdev;
+
+static LIST_HEAD(alloc_list);
+static DEFINE_IDR(global_idr);
+static DEFINE_MUTEX(lock);
+
+static void vm_open(struct vm_area_struct *vma);
+static void vm_close(struct vm_area_struct *vma);
+static struct vm_operations_struct vm_ops = {
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static void kunmap_alloc(struct hwmem_alloc *alloc);
+
+/* Helpers */
+
+static void destroy_alloc_threadg_info(
+ struct hwmem_alloc_threadg_info *info)
+{
+ if (info->threadg_pid)
+ put_pid(info->threadg_pid);
+
+ kfree(info);
+}
+
+static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct hwmem_alloc_threadg_info *tmp;
+
+ list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list),
+ list) {
+ list_del(&info->list);
+ destroy_alloc_threadg_info(info);
+ }
+}
+
+static enum hwmem_access get_access(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *my_pid;
+ bool found = false;
+
+ my_pid = find_get_pid(task_tgid_nr(current));
+ if (!my_pid)
+ return 0;
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == my_pid) {
+ found = true;
+ break;
+ }
+ }
+
+ put_pid(my_pid);
+
+ if (found)
+ return info->access;
+ else
+ return alloc->default_access;
+}
+
+static void clear_alloc_mem(struct hwmem_alloc *alloc)
+{
+ cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_CPU, NULL);
+
+ memset(alloc->kaddr, 0, alloc->size);
+}
+
+static void destroy_alloc(struct hwmem_alloc *alloc)
+{
+ list_del(&alloc->list);
+
+ if (alloc->name != 0) {
+ idr_remove(&global_idr, alloc->name);
+ alloc->name = 0;
+ }
+
+ clean_alloc_threadg_info_list(alloc);
+
+ kunmap_alloc(alloc);
+
+ if (!IS_ERR_OR_NULL(alloc->allocator_hndl))
+ alloc->mem_type->allocator_api.free(
+ alloc->mem_type->allocator_instance,
+ alloc->allocator_hndl);
+
+ kfree(alloc);
+}
+
+static int kmap_alloc(struct hwmem_alloc *alloc)
+{
+ int ret;
+ pgprot_t pgprot;
+ void *alloc_kaddr;
+
+ alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr(
+ alloc->mem_type->allocator_instance, alloc->allocator_hndl);
+ if (IS_ERR(alloc_kaddr))
+ return PTR_ERR(alloc_kaddr);
+
+ pgprot = PAGE_KERNEL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot);
+
+ ret = ioremap_page_range((unsigned long)alloc_kaddr,
+ (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot);
+ if (ret < 0) {
+ dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr,
+ alloc->paddr + alloc->size);
+ return ret;
+ }
+
+ alloc->kaddr = alloc_kaddr;
+
+ return 0;
+}
+
+static void kunmap_alloc(struct hwmem_alloc *alloc)
+{
+ if (alloc->kaddr == NULL)
+ return;
+
+ unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size);
+
+ alloc->kaddr = NULL;
+}
+
+static struct hwmem_mem_type_struct *resolve_mem_type(
+ enum hwmem_mem_type mem_type)
+{
+ unsigned int i;
+ for (i = 0; i < hwmem_num_mem_types; i++) {
+ if (hwmem_mem_types[i].id == mem_type)
+ return &hwmem_mem_types[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/* HWMEM API */
+
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+
+ if (hwdev == NULL) {
+ printk(KERN_ERR "HWMEM: Badly configured\n");
+ return ERR_PTR(-ENOMSG);
+ }
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ size = PAGE_ALIGN(size);
+
+ alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto alloc_alloc_failed;
+ }
+
+ INIT_LIST_HEAD(&alloc->list);
+ atomic_inc(&alloc->ref_cnt);
+ alloc->flags = flags;
+ alloc->default_access = def_access;
+ INIT_LIST_HEAD(&alloc->threadg_info_list);
+#ifdef CONFIG_DEBUG_FS
+ alloc->creator = __builtin_return_address(0);
+ alloc->creator_tgid = task_tgid_nr(current);
+#endif
+ alloc->mem_type = resolve_mem_type(mem_type);
+ if (IS_ERR(alloc->mem_type)) {
+ ret = PTR_ERR(alloc->mem_type);
+ goto resolve_mem_type_failed;
+ }
+
+ alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc(
+ alloc->mem_type->allocator_instance, size);
+ if (IS_ERR(alloc->allocator_hndl)) {
+ ret = PTR_ERR(alloc->allocator_hndl);
+ goto allocator_failed;
+ }
+
+ alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr(
+ alloc->allocator_hndl);
+ alloc->size = alloc->mem_type->allocator_api.get_alloc_size(
+ alloc->allocator_hndl);
+
+ cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size);
+ ret = kmap_alloc(alloc);
+ if (ret < 0)
+ goto kmap_alloc_failed;
+ cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr);
+
+ list_add_tail(&alloc->list, &alloc_list);
+
+ clear_alloc_mem(alloc);
+
+ goto out;
+
+kmap_alloc_failed:
+allocator_failed:
+resolve_mem_type_failed:
+ destroy_alloc(alloc);
+alloc_alloc_failed:
+ alloc = ERR_PTR(ret);
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_alloc);
+
+void hwmem_release(struct hwmem_alloc *alloc)
+{
+ mutex_lock(&lock);
+
+ if (atomic_dec_and_test(&alloc->ref_cnt))
+ destroy_alloc(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_release);
+
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ mutex_lock(&lock);
+
+ cach_set_domain(&alloc->cach_buf, access, domain, region);
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_set_domain);
+
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ u32 *mem_chunks_length)
+{
+ if (*mem_chunks_length < 1) {
+ *mem_chunks_length = 1;
+ return -ENOSPC;
+ }
+
+ mutex_lock(&lock);
+
+ mem_chunks[0].paddr = alloc->paddr;
+ mem_chunks[0].size = alloc->size;
+ *mem_chunks_length = 1;
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_pin);
+
+void hwmem_unpin(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_unpin);
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ hwmem_release((struct hwmem_alloc *)vma->vm_private_data);
+}
+
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ enum hwmem_access access;
+ mutex_lock(&lock);
+
+ access = get_access(alloc);
+
+ /* Check permissions */
+ if ((!(access & HWMEM_ACCESS_WRITE) &&
+ (vma->vm_flags & VM_WRITE)) ||
+ (!(access & HWMEM_ACCESS_READ) &&
+ (vma->vm_flags & VM_READ))) {
+ ret = -EPERM;
+ goto illegal_access;
+ }
+
+ if (vma_size > alloc->size) {
+ ret = -EINVAL;
+ goto illegal_size;
+ }
+
+ /*
+ * We don't want Linux to do anything (merging etc) with our VMAs as
+ * the offset is not necessarily valid
+ */
+ vma->vm_flags |= VM_SPECIAL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot);
+ vma->vm_private_data = (void *)alloc;
+ atomic_inc(&alloc->ref_cnt);
+ vma->vm_ops = &vm_ops;
+
+ ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT,
+ min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot);
+ if (ret < 0)
+ goto map_failed;
+
+ goto out;
+
+map_failed:
+ atomic_dec(&alloc->ref_cnt);
+illegal_size:
+illegal_access:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_mmap);
+
+void *hwmem_kmap(struct hwmem_alloc *alloc)
+{
+ void *ret;
+
+ mutex_lock(&lock);
+
+ ret = alloc->kaddr;
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_kmap);
+
+void hwmem_kunmap(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_kunmap);
+
+int hwmem_set_access(struct hwmem_alloc *alloc,
+ enum hwmem_access access, pid_t pid_nr)
+{
+ int ret;
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *pid;
+ bool found = false;
+
+ pid = find_get_pid(pid_nr);
+ if (!pid) {
+ ret = -EINVAL;
+ goto error_get_pid;
+ }
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == pid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ info->threadg_pid = pid;
+ info->access = access;
+
+ list_add_tail(&(info->list), &(alloc->threadg_info_list));
+ } else {
+ info->access = access;
+ }
+
+ return 0;
+
+error_alloc_info:
+ put_pid(pid);
+error_get_pid:
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_set_access);
+
+void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access)
+{
+ mutex_lock(&lock);
+
+ if (size != NULL)
+ *size = alloc->size;
+ if (mem_type != NULL)
+ *mem_type = alloc->mem_type->id;
+ if (access != NULL)
+ *access = get_access(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_get_info);
+
+s32 hwmem_get_name(struct hwmem_alloc *alloc)
+{
+ int ret = 0, name;
+
+ mutex_lock(&lock);
+
+ if (alloc->name != 0) {
+ ret = alloc->name;
+ goto out;
+ }
+
+ while (true) {
+ if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto pre_get_id_failed;
+ }
+
+ ret = idr_get_new_above(&global_idr, alloc, 1, &name);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto get_id_failed;
+ }
+
+ if (name > S32_MAX) {
+ ret = -ENOMSG;
+ goto overflow;
+ }
+
+ alloc->name = name;
+
+ ret = name;
+ goto out;
+
+overflow:
+ idr_remove(&global_idr, name);
+get_id_failed:
+pre_get_id_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_get_name);
+
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name)
+{
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&lock);
+
+ alloc = idr_find(&global_idr, name);
+ if (alloc == NULL) {
+ alloc = ERR_PTR(-EINVAL);
+ goto find_failed;
+ }
+ atomic_inc(&alloc->ref_cnt);
+
+ goto out;
+
+find_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_resolve_by_name);
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size)
+{
+ int ret;
+ char creator[KSYM_SYMBOL_LEN];
+ int i;
+
+ if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0)
+ creator[0] = '\0';
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l,
+ "%#x\n"
+ "\tSize: %u\n"
+ "\tMemory type: %u\n"
+ "\tName: %#x\n"
+ "\tReference count: %i\n"
+ "\tAllocation flags: %#x\n"
+ "\t$ settings: %#x\n"
+ "\tDefault access: %#x\n"
+ "\tPhysical address: %#x\n"
+ "\tKernel virtual address: %#x\n"
+ "\tCreator: %s\n"
+ "\tCreator thread group id: %u\n",
+ (unsigned int)alloc, alloc->size, alloc->mem_type->id,
+ alloc->name, atomic_read(&alloc->ref_cnt),
+ alloc->flags, alloc->cach_buf.cache_settings,
+ alloc->default_access, alloc->paddr,
+ (unsigned int)alloc->kaddr, creator,
+ alloc->creator_tgid);
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ size_t i = 0;
+ struct hwmem_alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_alloc, &alloc_list, list) {
+ if (i++ < (size_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(curr_alloc, &local_buf_pos, available_space -
+ (size_t)(local_buf_pos - local_buf));
+ if (ret == -EINVAL) /* No more room */
+ break;
+ else if (ret < 0)
+ goto out;
+
+ *curr_pos = (void *)i;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static void init_debugfs(void)
+{
+ /* Hwmem is never unloaded so dropping the dentrys is ok. */
+ struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL);
+ (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0,
+ &debugfs_allocs_fops);
+}
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+/* Module */
+
+extern int hwmem_ioctl_init(void);
+
+static int __devinit hwmem_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (hwdev) {
+ dev_err(&pdev->dev, "Probed multiple times\n");
+ return -EINVAL;
+ }
+
+ hwdev = pdev;
+
+ /*
+ * No need to flush the caches here. If we can keep track of the cache
+ * content then none of our memory will be in the caches, if we can't
+ * keep track of the cache content we always assume all our memory is
+ * in the caches.
+ */
+
+ ret = hwmem_ioctl_init();
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing"
+ " anyway\n");
+
+#ifdef CONFIG_DEBUG_FS
+ init_debugfs();
+#endif
+
+ dev_info(&pdev->dev, "Probed OK\n");
+
+ return 0;
+}
+
+static struct platform_driver hwmem_driver = {
+ .probe = hwmem_probe,
+ .driver = {
+ .name = "hwmem",
+ },
+};
+
+static int __init hwmem_init(void)
+{
+ return platform_driver_register(&hwmem_driver);
+}
+subsys_initcall(hwmem_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hardware memory driver");
+
diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c
new file mode 100644
index 00000000000..33bb26c27ca
--- /dev/null
+++ b/drivers/misc/stm.c
@@ -0,0 +1,850 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Philippe Langlais <philippe.Langlais@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <trace/stm.h>
+
+/* STM Registers */
+#define STM_CR (stm.virtbase)
+#define STM_MMC (stm.virtbase + 0x008)
+#define STM_TER (stm.virtbase + 0x010)
+#define STMPERIPHID0 (stm.virtbase + 0xFC0)
+#define STMPERIPHID1 (stm.virtbase + 0xFC8)
+#define STMPERIPHID2 (stm.virtbase + 0xFD0)
+#define STMPERIPHID3 (stm.virtbase + 0xFD8)
+#define STMPCELLID0 (stm.virtbase + 0xFE0)
+#define STMPCELLID1 (stm.virtbase + 0xFE8)
+#define STMPCELLID2 (stm.virtbase + 0xFF0)
+#define STMPCELLID3 (stm.virtbase + 0xFF8)
+
+#define STM_CLOCK_SHIFT 6
+#define STM_CLOCK_MASK 0x1C0
+
+/* Hardware mode for all sources */
+#define STM_MMC_DEFAULT CONFIG_STM_DEFAULT_MASTERS_MODES
+
+/* Max number of channels (multiple of 256) */
+#define STM_NUMBER_OF_CHANNEL CONFIG_STM_NUMBER_OF_CHANNEL
+
+/* # dynamically allocated channel with stm_trace_buffer */
+#define NB_KERNEL_DYNAMIC_CHANNEL 128
+
+static struct stm_device {
+ const struct stm_platform_data *pdata;
+ void __iomem *virtbase;
+ /* Used to register the allocated channels */
+ DECLARE_BITMAP(ch_bitmap, STM_NUMBER_OF_CHANNEL);
+} stm;
+
+volatile struct stm_channel __iomem *stm_channels;
+
+static struct cdev cdev;
+static struct class *stm_class;
+static int stm_major;
+
+static DEFINE_SPINLOCK(lock);
+
+/* Middle value for clock divisor */
+static enum clock_div stm_clockdiv = STM_CLOCK_DIV8;
+
+/* Default value for STM output connection */
+static enum stm_connection_type stm_connection = STM_DEFAULT_CONNECTION;
+
+#define STM_BUFSIZE 256
+struct channel_data {
+ DECLARE_BITMAP(bitmap, STM_NUMBER_OF_CHANNEL);
+ int numero;
+ spinlock_t lock;
+ u8 data_buffer[STM_BUFSIZE];
+};
+
+static u64 stm_printk_buf[1024/sizeof(u64)];
+static arch_spinlock_t stm_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+
+static char *mipi60 = "none";
+module_param(mipi60, charp, S_IRUGO);
+MODULE_PARM_DESC(mipi60, "STM Trace to output on probe2 of mipi60 "
+ "('none' or 'ape' or 'modem')");
+
+static char *mipi34 = "none";
+module_param(mipi34, charp, S_IRUGO);
+MODULE_PARM_DESC(mipi34, "STM Trace to output on mipi34 "
+ "('none' or 'ape' or 'modem')");
+
+static char *microsd = "none";
+module_param(microsd, charp, S_IRUGO);
+MODULE_PARM_DESC(microsd, "STM Trace to output on SD card connector "
+ "('none' or 'ape' or 'modem')");
+
+static unsigned int stm_ter;
+module_param(stm_ter, uint, 0);
+MODULE_PARM_DESC(stm_ter, "Value for STM_TER (trace control register). "
+ "Should be set by user as environment variable stm.stm_ter");
+
+#define IS_APE_ON_MIPI34 (mipi34 && !strcmp(mipi34, "ape"))
+#define IS_APE_ON_MIPI60 (mipi60 && !strcmp(mipi60, "ape"))
+#define IS_APE_ON_MICROSD (microsd && !strcmp(microsd, "ape"))
+#define IS_MODEM_ON_MICROSD (microsd && !strcmp(microsd, "modem"))
+
+static int stm_connection_set(void *data, u64 val);
+
+int stm_alloc_channel(int offset)
+{
+ int channel;
+
+ /* Look for a free channel from offset */
+ do {
+ channel = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ } while ((channel < STM_NUMBER_OF_CHANNEL)
+ && test_and_set_bit(channel, stm.ch_bitmap));
+ return channel;
+}
+EXPORT_SYMBOL(stm_alloc_channel);
+
+void stm_free_channel(int channel)
+{
+ clear_bit(channel, stm.ch_bitmap);
+}
+EXPORT_SYMBOL(stm_free_channel);
+
+static int stm_get_channel(struct channel_data *ch_data, int __user *arg)
+{
+ int channel, err;
+
+ channel = stm_alloc_channel(0);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ /* One free found ! */
+ err = put_user(channel, arg);
+ if (err)
+ stm_free_channel(channel);
+ else
+ /* Register it in the context of the file */
+ set_bit(channel, ch_data->bitmap);
+ } else
+ err = -ENOMEM;
+ return err;
+}
+
+static int stm_release_channel(struct channel_data *ch_data, int channel)
+{
+ if ((channel < 0) || (channel >= STM_NUMBER_OF_CHANNEL))
+ return -EINVAL;
+ stm_free_channel(channel);
+ clear_bit(channel, ch_data->bitmap);
+ return 0;
+}
+
+/*
+ * Trace a buffer on a given channel
+ * with auto time stamping on last byte(s) only
+ */
+int stm_trace_buffer_onchannel(int channel,
+ const void *data, size_t length)
+{
+ int i, mod64;
+ volatile struct stm_channel __iomem *pch;
+
+ if (channel >= STM_NUMBER_OF_CHANNEL || !stm_channels)
+ return 0;
+
+ pch = &stm_channels[channel];
+
+ /* Align data pointer to u64 & time stamp last byte(s) */
+ mod64 = (int)data & 7;
+ i = length - 8 + mod64;
+ switch (mod64) {
+ case 0:
+ if (i)
+ pch->no_stamp64 = *(u64 *)data;
+ else {
+ pch->stamp64 = *(u64 *)data;
+ return length;
+ }
+ data += 8;
+ break;
+ case 1:
+ pch->no_stamp8 = *(u8 *)data;
+ pch->no_stamp16 = *(u16 *)(data+1);
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+3);
+ else {
+ pch->stamp32 = *(u32 *)(data+3);
+ return length;
+ }
+ data += 7;
+ break;
+ case 2:
+ pch->no_stamp16 = *(u16 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+2);
+ else {
+ pch->stamp32 = *(u32 *)(data+2);
+ return length;
+ }
+ data += 6;
+ break;
+ case 3:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+1);
+ else {
+ pch->stamp32 = *(u32 *)(data+1);
+ return length;
+ }
+ data += 5;
+ break;
+ case 4:
+ if (i)
+ pch->no_stamp32 = *(u32 *)data;
+ else {
+ pch->stamp32 = *(u32 *)data;
+ return length;
+ }
+ data += 4;
+ break;
+ case 5:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp16 = *(u16 *)(data+1);
+ else {
+ pch->stamp16 = *(u16 *)(data+1);
+ return length;
+ }
+ data += 3;
+ break;
+ case 6:
+ if (i)
+ pch->no_stamp16 = *(u16 *)data;
+ else {
+ pch->stamp16 = *(u16 *)data;
+ return length;
+ }
+ data += 2;
+ break;
+ case 7:
+ if (i)
+ pch->no_stamp8 = *(u8 *)data;
+ else {
+ pch->stamp8 = *(u8 *)data;
+ return length;
+ }
+ data++;
+ break;
+ }
+ for (;;) {
+ if (i > 8) {
+ pch->no_stamp64 = *(u64 *)data;
+ data += 8;
+ i -= 8;
+ } else if (i == 8) {
+ pch->stamp64 = *(u64 *)data;
+ break;
+ } else if (i > 4) {
+ pch->no_stamp32 = *(u32 *)data;
+ data += 4;
+ i -= 4;
+ } else if (i == 4) {
+ pch->stamp32 = *(u32 *)data;
+ break;
+ } else if (i > 2) {
+ pch->no_stamp16 = *(u16 *)data;
+ data += 2;
+ i -= 2;
+ } else if (i == 2) {
+ pch->stamp16 = *(u16 *)data;
+ break;
+ } else {
+ pch->stamp8 = *(u8 *)data;
+ break;
+ }
+ }
+ return length;
+}
+EXPORT_SYMBOL(stm_trace_buffer_onchannel);
+
+static int stm_open(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel_data;
+ int retval = 0;
+
+ channel_data = kzalloc(sizeof(struct channel_data), GFP_KERNEL);
+ if (channel_data == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&channel_data->lock);
+ channel_data->numero = -1; /* Channel not yet allocated */
+ file->private_data = channel_data;
+
+ /*
+ * Check if microsd is selected as trace interface
+ * and enable corresponding pins muxing.
+ */
+ if (IS_MODEM_ON_MICROSD)
+ retval = stm_connection_set(NULL, STM_STE_MODEM_ON_MICROSD);
+ else if (IS_APE_ON_MICROSD)
+ retval = stm_connection_set(NULL, STM_STE_APE_ON_MICROSD);
+
+ if (retval)
+ pr_alert("stm_open: failed to connect STM output\n");
+
+ return retval;
+}
+
+static int stm_release(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel;
+
+ channel = (struct channel_data *)file->private_data;
+
+ /* Free allocated channel if necessary */
+ if (channel->numero != -1)
+ stm_free_channel(channel->numero);
+
+ bitmap_andnot(stm.ch_bitmap, stm.ch_bitmap,
+ channel->bitmap, STM_NUMBER_OF_CHANNEL);
+
+ kfree(channel);
+ return 0;
+}
+
+static ssize_t stm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct channel_data *channel = file->private_data;
+
+ /* Alloc channel at first write */
+ if (channel->numero == -1) {
+ channel->numero = stm_alloc_channel(0);
+ if (channel->numero > STM_NUMBER_OF_CHANNEL)
+ return -ENOMEM;
+ }
+
+ if (size > STM_BUFSIZE)
+ size = STM_BUFSIZE;
+
+ spin_lock(&channel->lock);
+
+ if (copy_from_user
+ (channel->data_buffer, (void __user *) buf, size)) {
+ spin_unlock(&channel->lock);
+ return -EFAULT;
+ }
+ size = stm_trace_buffer_onchannel(channel->numero,
+ channel->data_buffer, size);
+
+ spin_unlock(&channel->lock);
+
+ return size;
+}
+
+static int stm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Don't allow a mapping that covers more than the STM channels
+ */
+ if ((vma->vm_end - vma->vm_start) >
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel))
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ stm.pdata->channels_phys_base>>PAGE_SHIFT,
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel),
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/* Enable the trace for given sources (bitfield) */
+static void stm_enable_src(unsigned int v)
+{
+ unsigned int cr_val;
+ spin_lock(&lock);
+ cr_val = readl(STM_CR);
+ cr_val &= ~STM_CLOCK_MASK;
+ writel(cr_val|(stm_clockdiv<<STM_CLOCK_SHIFT), STM_CR);
+ /*
+ * If the kernel argument stm_ter has been set by the boot loader
+ * all calls to stm_enable_src will be ignored
+ */
+ v = stm_ter ? stm_ter : v;
+ writel(v, STM_TER);
+ spin_unlock(&lock);
+}
+
+/* Disable all sources */
+static void stm_disable_src(void)
+{
+ writel(0x0, STM_CR); /* stop clock */
+ writel(0x0, STM_TER); /* Disable cores */
+}
+
+/* Set clock speed */
+static int stm_set_ckdiv(enum clock_div v)
+{
+ unsigned int val;
+
+ spin_lock(&lock);
+ val = readl(STM_CR);
+ val &= ~STM_CLOCK_MASK;
+ writel(val | ((v << STM_CLOCK_SHIFT) & STM_CLOCK_MASK), STM_CR);
+ spin_unlock(&lock);
+ stm_clockdiv = v;
+
+ return 0;
+}
+
+/* Return the control register */
+static inline unsigned int stm_get_cr(void)
+{
+ return readl(STM_CR);
+}
+
+/*
+ * Set Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source
+ */
+static inline void stm_set_modes(unsigned int modes)
+{
+ writel(modes, STM_MMC);
+}
+
+/* Get Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source */
+static inline unsigned int stm_get_modes(void)
+{
+ return readl(STM_MMC);
+}
+
+/* Count # of free channels */
+static int stm_nb_free_channels(void)
+{
+ int nb_channels, offset;
+
+ nb_channels = 0;
+ offset = 0;
+ for (;;) {
+ offset = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ if (offset == STM_NUMBER_OF_CHANNEL)
+ break;
+ offset++;
+ nb_channels++;
+ }
+ return nb_channels;
+}
+
+static long stm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct channel_data *channel = file->private_data;
+
+ switch (cmd) {
+
+ case STM_CONNECTION:
+ if (stm.pdata->stm_connection)
+ stm.pdata->stm_connection(arg);
+ stm_connection = arg;
+ break;
+
+ case STM_DISABLE:
+ stm_disable_src();
+ break;
+
+ case STM_GET_NB_MAX_CHANNELS:
+ err = put_user(STM_NUMBER_OF_CHANNEL, (unsigned int *)arg);
+ break;
+
+ case STM_GET_NB_FREE_CHANNELS:
+ err = put_user(stm_nb_free_channels(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CHANNEL_NO:
+ err = put_user(channel->numero, (unsigned int *)arg);
+ break;
+
+ case STM_SET_CLOCK_DIV:
+ err = stm_set_ckdiv((enum clock_div) arg);
+ break;
+
+ case STM_SET_MODE:
+ stm_set_modes(arg);
+ break;
+
+ case STM_GET_MODE:
+ err = put_user(stm_get_modes(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CTRL_REG:
+ err = put_user(stm_get_cr(), (unsigned int *)arg);
+ break;
+
+ case STM_ENABLE_SRC:
+ stm_enable_src(arg);
+ break;
+
+ case STM_GET_FREE_CHANNEL:
+ err = stm_get_channel(channel, (int *)arg);
+ break;
+
+ case STM_RELEASE_CHANNEL:
+ err = stm_release_channel(channel, arg);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Trace a buffer on a dynamically allocated channel
+ * with auto time stamping on the first byte(s) only
+ * Dynamic channel number >=
+ * STM_NUMBER_OF_CHANNEL - NB_KERNEL_DYNAMIC_CHANNEL
+ */
+int stm_trace_buffer(const void *data, size_t length)
+{
+ int channel;
+
+ channel = stm_alloc_channel(STM_NUMBER_OF_CHANNEL
+ - NB_KERNEL_DYNAMIC_CHANNEL);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ length = stm_trace_buffer_onchannel(channel, data, length);
+ stm_free_channel(channel);
+ return length;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(stm_trace_buffer);
+
+static const struct file_operations stm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = stm_ioctl,
+ .open = stm_open,
+ .llseek = no_llseek,
+ .write = stm_write,
+ .release = stm_release,
+ .mmap = stm_mmap,
+};
+
+/*
+ * Init and deinit driver
+ */
+
+static int __devinit stm_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ pr_alert("No device/platform_data found on STM driver\n");
+ return -ENODEV;
+ }
+
+ stm.pdata = pdev->dev.platform_data;
+
+ cdev_init(&cdev, &stm_fops);
+ cdev.owner = THIS_MODULE;
+
+ stm_channels =
+ ioremap_nocache(stm.pdata->channels_phys_base,
+ STM_NUMBER_OF_CHANNEL*sizeof(*stm_channels));
+ if (stm_channels == NULL) {
+ dev_err(&pdev->dev, "could not remap STM Msg register\n");
+ return -ENODEV;
+ }
+
+ stm.virtbase = ioremap_nocache(stm.pdata->regs_phys_base, SZ_4K);
+ if (stm.virtbase == NULL) {
+ retval = -EIO;
+ dev_err(&pdev->dev, "could not remap STM Register\n");
+ goto err_channels;
+ }
+
+ retval = cdev_add(&cdev, MKDEV(stm_major, 0), 1);
+ if (retval) {
+ dev_err(&pdev->dev, "chardev registration failed\n");
+ goto err_channels;
+ }
+
+ if (IS_ERR(device_create(stm_class, &pdev->dev,
+ MKDEV(stm_major, 0), NULL, STM_DEV_NAME)))
+ dev_err(&pdev->dev, "can't create device\n");
+
+ /* Check chip IDs if necessary */
+ if (stm.pdata->id_mask) {
+ u32 periph_id, cell_id;
+
+ periph_id = (readb(STMPERIPHID3)<<24) +
+ (readb(STMPERIPHID2)<<16) +
+ (readb(STMPERIPHID1)<<8) +
+ readb(STMPERIPHID0);
+ cell_id = (readb(STMPCELLID3)<<24) +
+ (readb(STMPCELLID2)<<16) +
+ (readb(STMPCELLID1)<<8) +
+ readb(STMPCELLID0);
+ /* Only warns if it isn't a ST-Ericsson supported one */
+ if ((periph_id & stm.pdata->id_mask) != 0x00080dec ||
+ cell_id != 0xb105f00d) {
+ dev_warn(&pdev->dev, "STM-Trace IC not compatible\n");
+ dev_warn(&pdev->dev, "periph_id=%x\n", periph_id);
+ dev_warn(&pdev->dev, "pcell_id=%x\n", cell_id);
+ }
+ }
+
+ /* Reserve channels if necessary */
+ if (stm.pdata->channels_reserved_sz) {
+ int i;
+
+ for (i = 0; i < stm.pdata->channels_reserved_sz; i++) {
+ set_bit(stm.pdata->channels_reserved[i],
+ stm.ch_bitmap);
+ }
+ }
+ /* Reserve kernel trace channels on demand */
+#ifdef CONFIG_STM_PRINTK
+ set_bit(CONFIG_STM_PRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_FTRACE
+ set_bit(CONFIG_STM_FTRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_CTX_SWITCH
+ set_bit(CONFIG_STM_CTX_SWITCH_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_WAKEUP
+ set_bit(CONFIG_STM_WAKEUP_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_STACK_TRACE
+ set_bit(CONFIG_STM_STACK_TRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_TRACE_PRINTK
+ set_bit(CONFIG_STM_TRACE_PRINTK_CHANNEL, stm.ch_bitmap);
+ set_bit(CONFIG_STM_TRACE_BPRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+
+ /* Check kernel's environment parameters first */
+ if (IS_APE_ON_MIPI34)
+ stm_connection = STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60;
+ else if (IS_APE_ON_MIPI60)
+ stm_connection = STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60;
+
+ /* Apply parameters to driver */
+ if (stm.pdata->stm_connection) {
+ retval = stm.pdata->stm_connection(stm_connection);
+ if (retval) {
+ dev_err(&pdev->dev, "failed to connect STM output\n");
+ goto err_channels;
+ }
+ }
+
+ /* Enable STM Masters given in pdata */
+ if (stm.pdata->masters_enabled)
+ stm_enable_src(stm.pdata->masters_enabled);
+ stm_set_modes(STM_MMC_DEFAULT); /* Set all sources in HW mode */
+
+ dev_info(&pdev->dev, "STM-Trace driver probed successfully\n");
+ stm_printk("STM-Trace driver initialized\n");
+ return 0;
+
+err_channels:
+ iounmap(stm_channels);
+ return retval;
+}
+
+static int __devexit stm_remove(struct platform_device *pdev)
+{
+ device_destroy(stm_class, MKDEV(stm_major, 0));
+ cdev_del(&cdev);
+
+ if (stm.pdata->stm_connection)
+ (void) stm.pdata->stm_connection(STM_DISCONNECT);
+
+ stm_disable_src();
+ iounmap(stm.virtbase);
+ iounmap(stm_channels);
+
+ return 0;
+}
+
+int stm_printk(const char *fmt, ...)
+{
+ int ret;
+ size_t size;
+ va_list args;
+
+ va_start(args, fmt);
+ arch_spin_lock(&stm_buf_lock);
+ size = vscnprintf((char *)stm_printk_buf,
+ sizeof(stm_printk_buf), fmt, args);
+ ret = stm_trace_buffer(stm_printk_buf, size);
+ arch_spin_unlock(&stm_buf_lock);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(stm_printk);
+
+/*
+ * Debugfs interface
+ */
+
+static int stm_connection_show(void *data, u64 *val)
+{
+ *val = stm_connection;
+ return 0;
+}
+
+static int stm_connection_set(void *data, u64 val)
+{
+ int retval = 0;
+
+ if (stm.pdata->stm_connection) {
+ stm_connection = val;
+ retval = stm.pdata->stm_connection(val);
+ }
+ return retval;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_connection_fops, stm_connection_show,
+ stm_connection_set, "%llu\n");
+
+static int stm_clockdiv_show(void *data, u64 *val)
+{
+ *val = stm_clockdiv;
+ return 0;
+}
+
+static int stm_clockdiv_set(void *data, u64 val)
+{
+ stm_set_ckdiv(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_clockdiv_fops, stm_clockdiv_show,
+ stm_clockdiv_set, "%llu\n");
+
+static int stm_masters_enable_show(void *data, u64 *val)
+{
+ *val = readl(STM_TER);
+ return 0;
+}
+
+static int stm_masters_enable_set(void *data, u64 val)
+{
+ stm_enable_src(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_enable_fops, stm_masters_enable_show,
+ stm_masters_enable_set, "%08llx\n");
+
+static int stm_masters_modes_show(void *data, u64 *val)
+{
+ *val = stm_get_modes();
+ return 0;
+}
+
+static int stm_masters_modes_set(void *data, u64 val)
+{
+ stm_set_modes(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_modes_fops, stm_masters_modes_show,
+ stm_masters_modes_set, "%08llx\n");
+
+/* Count # of free channels */
+static int stm_free_channels_show(void *data, u64 *val)
+{
+ *val = stm_nb_free_channels();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_free_channels_fops, stm_free_channels_show,
+ NULL, "%lld\n");
+
+static __init int stm_init_debugfs(void)
+{
+ struct dentry *d_stm;
+
+ d_stm = debugfs_create_dir(STM_DEV_NAME, NULL);
+ if (!d_stm)
+ return -ENOMEM;
+
+ (void) debugfs_create_file("connection", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_connection_fops);
+ (void) debugfs_create_file("clockdiv", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_clockdiv_fops);
+ (void) debugfs_create_file("masters_enable", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_enable_fops);
+ (void) debugfs_create_file("masters_modes", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_modes_fops);
+ (void) debugfs_create_file("free_channels", S_IRUGO, d_stm,
+ NULL, &stm_free_channels_fops);
+ return 0;
+}
+fs_initcall(stm_init_debugfs);
+
+static struct platform_driver stm_driver = {
+ .probe = stm_probe,
+ .remove = __devexit_p(stm_remove),
+ .driver = {
+ .name = STM_DEV_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init stm_init(void)
+{
+ int retval;
+ dev_t dev;
+
+ stm_class = class_create(THIS_MODULE, STM_DEV_NAME);
+ if (IS_ERR(stm_class)) {
+ pr_err("stm: can't register stm class\n");
+ return PTR_ERR(stm_class);
+ }
+
+ retval = alloc_chrdev_region(&dev, 0, 1, STM_DEV_NAME);
+ if (retval) {
+ pr_err("stm: can't register character device\n");
+ class_destroy(stm_class);
+ return retval;
+ }
+ stm_major = MAJOR(dev);
+ return platform_driver_register(&stm_driver);
+}
+
+static void __exit stm_exit(void)
+{
+ platform_driver_unregister(&stm_driver);
+ unregister_chrdev_region(MKDEV(stm_major, 0), 1);
+ class_destroy(stm_class);
+}
+
+arch_initcall(stm_init); /* STM init ASAP need to wait GPIO init */
+module_exit(stm_exit);
+
+MODULE_AUTHOR("Paul Ghaleb - ST Microelectronics");
+MODULE_AUTHOR("Pierre Peiffer - ST-Ericsson");
+MODULE_AUTHOR("Philippe Langlais - ST-Ericsson");
+MODULE_DESCRIPTION("System Trace Module driver");
+MODULE_ALIAS("stm");
+MODULE_ALIAS("stm-trace");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec556ebb..4c1b59aff9d 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -107,6 +107,7 @@ struct mmc_blk_data {
unsigned int part_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
+ struct device_attribute power_ro_lock_legacy;
int area_type;
};
@@ -167,6 +168,87 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+#define EXT_CSD_BOOT_WP_PWR_WP_TEXT "pwr_ro"
+#define EXT_CSD_BOOT_WP_PERM_WP_TEXT "perm_ro"
+#define EXT_CSD_BOOT_WP_WP_DISABLED_TEXT "rw"
+static ssize_t boot_partition_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ const char *out_text;
+
+ if (card->ext_csd.boot_ro_lock
+ & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ out_text = EXT_CSD_BOOT_WP_PERM_WP_TEXT;
+ else if (card->ext_csd.boot_ro_lock
+ & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ out_text = EXT_CSD_BOOT_WP_PWR_WP_TEXT;
+ else
+ out_text = EXT_CSD_BOOT_WP_WP_DISABLED_TEXT;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", out_text);
+
+ return ret;
+}
+
+static ssize_t boot_partition_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_card *card;
+ u8 set = 0;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ if (!strncmp(buf, EXT_CSD_BOOT_WP_PWR_WP_TEXT,
+ strlen(EXT_CSD_BOOT_WP_PWR_WP_TEXT)))
+ set = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+ else if (!strncmp(buf, EXT_CSD_BOOT_WP_PERM_WP_TEXT,
+ strlen(EXT_CSD_BOOT_WP_PERM_WP_TEXT)))
+ set = EXT_CSD_BOOT_WP_B_PERM_WP_EN;
+
+ if (set) {
+ mmc_claim_host(card->host);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BOOT_WP,
+ set,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("Boot Partition Lock failed: %d", ret);
+ else
+ card->ext_csd.boot_ro_lock = set;
+
+ mmc_release_host(card->host);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition "
+ "%s",
+ md->disk->disk_name,
+ buf);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type ==
+ MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition "
+ "%s",
+ part_md->disk->disk_name,
+ buf);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+ }
+ ret = count;
+
+ mmc_blk_put(md);
+ return ret;
+}
+
static ssize_t power_ro_lock_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1411,6 +1493,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+ /*
+ * We must make sure we have not claimed the host before
+ * doing a flush to prevent deadlock, thus we check if
+ * the host needs a resume first.
+ */
+ if (mmc_host_needs_resume(card->host))
+ mmc_resume_host_sync(card->host);
+
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
mmc_claim_host(card->host);
@@ -1654,9 +1744,12 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
- card->ext_csd.boot_ro_lockable)
+ card->ext_csd.boot_ro_lockable) {
device_remove_file(disk_to_dev(md->disk),
&md->power_ro_lock);
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock_legacy);
+ }
/* Stop new requests from getting into the queue */
del_gendisk(md->disk);
@@ -1716,9 +1809,24 @@ static int mmc_add_disk(struct mmc_blk_data *md)
&md->power_ro_lock);
if (ret)
goto power_ro_lock_fail;
+
+ /* Legacy mode */
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock_legacy.show = boot_partition_ro_lock_show;
+ md->power_ro_lock_legacy.store = boot_partition_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock_legacy.attr);
+ md->power_ro_lock_legacy.attr.mode = mode;
+ md->power_ro_lock_legacy.attr.name = "ro_lock";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock_legacy);
+ if (ret)
+ goto power_ro_lock_fail_legacy;
}
return ret;
+power_ro_lock_fail_legacy:
+ device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
power_ro_lock_fail:
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
force_ro_fail:
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 759714ed6be..6622f2e6e05 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1253,6 +1253,130 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test)
return 0;
}
+
+/* helper function for various address alignment and sg length alignment */
+static int mmc_test_align_multi(struct mmc_test_card *test, bool do_write,
+ struct scatterlist *sg,
+ u32 *sizes, int sg_len, int offset)
+{
+ int ret, i;
+ unsigned int size;
+ u32 buf_off;
+ u32 sg_size;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+ size -= offset;
+ size -= size % 512;
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ for (i = 0, sg_size = 0;
+ i < sg_len && sg_size + sizes[i] < size; i++)
+ sg_size += sizes[i];
+
+ if (sg_size < size)
+ sizes[i-1] += size - sg_size;
+ sg_len = i;
+
+ sg_init_table(sg, sg_len);
+ for (i = 0, buf_off = offset; i < sg_len; i++) {
+ sg_set_buf(&sg[i], test->buffer + buf_off, sizes[i]);
+ buf_off += sizes[i];
+ }
+
+ ret = mmc_test_transfer(test, sg, sg_len, 0, size/512, 512, do_write);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_align_length_32(struct mmc_test_card *test, bool do_write)
+{
+ u32 sizes[] = {512, 32*1, 32*2, 32*3, 32*4, 32*5, 32*6, 32*7,
+ 32*8, 32*9, 32*10, 32*11, 32*12, 32*13, 2048};
+ struct scatterlist sg[ARRAY_SIZE(sizes)];
+
+ return mmc_test_align_multi(test, do_write, sg, sizes,
+ ARRAY_SIZE(sg), 0);
+}
+
+static int mmc_test_align_length_4(struct mmc_test_card *test, bool do_write)
+{
+ u32 sizes[] = {512, 4*1, 4*2, 4*3, 4*4, 4*5, 4*6, 4*7,
+ 4*8, 4*9, 520, 1040, 2080};
+ struct scatterlist sg[ARRAY_SIZE(sizes)];
+
+ return mmc_test_align_multi(test, do_write, sg, sizes,
+ ARRAY_SIZE(sg), 0);
+}
+
+static int mmc_test_align_length_4_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_length_4(test, do_write);
+}
+
+static int mmc_test_align_length_4_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_length_4(test, do_write);
+}
+
+static int mmc_test_align_length_32_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_length_32(test, do_write);
+}
+
+static int mmc_test_align_length_32_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_length_32(test, do_write);
+}
+
+/* helper function for testing address alignment */
+static int mmc_test_align_address(struct mmc_test_card *test, bool do_write,
+ u32 offset)
+{
+ u32 sizes[] = {512, 512, 1024, 1024, 2048};
+ struct scatterlist sg[ARRAY_SIZE(sizes)];
+
+ return mmc_test_align_multi(test, do_write, sg,
+ sizes, ARRAY_SIZE(sg), offset);
+}
+
+static int mmc_test_align_address_4_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_address(test, do_write, 4);
+}
+
+static int mmc_test_align_address_4_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_address(test, do_write, 4);
+}
+
+static int mmc_test_align_address_32_write(struct mmc_test_card *test)
+{
+ bool do_write = true;
+ return mmc_test_align_address(test, do_write, 32);
+}
+
+static int mmc_test_align_address_32_read(struct mmc_test_card *test)
+{
+ bool do_write = false;
+ return mmc_test_align_address(test, do_write, 32);
+}
+
static int mmc_test_xfersize_write(struct mmc_test_card *test)
{
int ret;
@@ -2451,6 +2575,62 @@ static const struct mmc_test_case mmc_test_cases[] = {
},
{
+ .name = "4 bytes aligned sg-element length write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_length_4_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "4 bytes aligned sg-element length read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_length_4_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element length write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_length_32_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element length read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_length_32_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "4 bytes aligned sg-element address write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_address_4_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "4 bytes aligned sg-element address read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_address_4_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element address write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_align_address_32_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "32 bytes aligned sg-element address read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_align_address_32_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
.name = "Correct xfer_size at write (start failure)",
.run = mmc_test_xfersize_write,
},
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe70bc..9cce415f1ff 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2010,7 +2010,7 @@ void mmc_rescan(struct work_struct *work)
container_of(work, struct mmc_host, detect.work);
int i;
- if (host->rescan_disable)
+ if (host->rescan_disable || mmc_host_needs_resume(host))
return;
mmc_bus_get(host);
@@ -2273,8 +2273,13 @@ int mmc_suspend_host(struct mmc_host *host)
int err = 0;
cancel_delayed_work(&host->detect);
+ cancel_delayed_work_sync(&host->resume);
mmc_flush_scheduled_work();
+ /* Skip suspend, if deferred resume were scheduled but not completed. */
+ if (mmc_host_needs_resume(host))
+ return 0;
+
err = mmc_cache_ctrl(host, 0);
if (err)
goto out;
@@ -2300,6 +2305,10 @@ int mmc_suspend_host(struct mmc_host *host)
mmc_release_host(host);
host->pm_flags = 0;
err = 0;
+ } else if (mmc_card_mmc(host->card) ||
+ mmc_card_sd(host->card)) {
+ host->pm_state |= MMC_HOST_DEFERRED_RESUME |
+ MMC_HOST_NEEDS_RESUME;
}
}
mmc_bus_put(host);
@@ -2321,6 +2330,12 @@ int mmc_resume_host(struct mmc_host *host)
{
int err = 0;
+ if (mmc_host_deferred_resume(host)) {
+ mmc_schedule_delayed_work(&host->resume,
+ msecs_to_jiffies(3000));
+ return 0;
+ }
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
@@ -2355,6 +2370,24 @@ int mmc_resume_host(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_resume_host);
+void mmc_resume_work(struct work_struct *work)
+{
+ struct mmc_host *host =
+ container_of(work, struct mmc_host, resume.work);
+
+ host->pm_state &= ~MMC_HOST_DEFERRED_RESUME;
+ mmc_resume_host(host);
+ host->pm_state &= ~MMC_HOST_NEEDS_RESUME;
+
+ mmc_detect_change(host, 0);
+}
+
+void mmc_resume_host_sync(struct mmc_host *host)
+{
+ flush_delayed_work_sync(&host->resume);
+}
+EXPORT_SYMBOL(mmc_resume_host_sync);
+
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3bdafbca354..5796d2d85f4 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -59,6 +59,7 @@ static inline void mmc_delay(unsigned int ms)
void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+void mmc_resume_work(struct work_struct *work);
int _mmc_detect_card_removed(struct mmc_host *host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 91c84c7a182..d87d1152ea2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -330,6 +330,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
+ INIT_DELAYED_WORK(&host->resume, mmc_resume_work);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
#endif
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 032b84791a1..7df4119aaa2 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/amba/bus.h>
@@ -46,6 +47,7 @@ static unsigned int fmax = 515633;
* struct variant_data - MMCI variant-specific quirks
* @clkreg: default value for MCICLOCK register
* @clkreg_enable: enable value for MMCICLOCK register
+ * @dma_sdio_req_ctrl: enable value for DMAREQCTL register for SDIO write
* @datalength_bits: number of bits in the MMCIDATALENGTH register
* @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
* is asserted (likewise for RX)
@@ -56,10 +58,13 @@ static unsigned int fmax = 515633;
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
* @pwrreg_powerup: power up value for MMCIPOWER register
* @signal_direction: input/out direction of bus signals can be indicated
+ * @non_power_of_2_blksize: true if block sizes can be other than power of two
+ * @pwrreg_ctrl_power: bits in MMCIPOWER register controls ext. power supply
*/
struct variant_data {
unsigned int clkreg;
unsigned int clkreg_enable;
+ unsigned int dma_sdio_req_ctrl;
unsigned int datalength_bits;
unsigned int fifosize;
unsigned int fifohalfsize;
@@ -68,6 +73,8 @@ struct variant_data {
bool blksz_datactrl16;
u32 pwrreg_powerup;
bool signal_direction;
+ bool non_power_of_2_blksize;
+ bool pwrreg_ctrl_power;
};
static struct variant_data variant_arm = {
@@ -75,6 +82,7 @@ static struct variant_data variant_arm = {
.fifohalfsize = 8 * 4,
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
+ .pwrreg_ctrl_power = true,
};
static struct variant_data variant_arm_extended_fifo = {
@@ -82,6 +90,7 @@ static struct variant_data variant_arm_extended_fifo = {
.fifohalfsize = 64 * 4,
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
+ .pwrreg_ctrl_power = true,
};
static struct variant_data variant_u300 = {
@@ -99,6 +108,7 @@ static struct variant_data variant_ux500 = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL,
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
@@ -111,15 +121,42 @@ static struct variant_data variant_ux500v2 = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL,
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .non_power_of_2_blksize = true,
};
/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!host->variant->non_power_of_2_blksize &&
+ !is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ if (data->sg->offset & 3) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported alginment (0x%x)\n", data->sg->offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
* This must be called with host->lock held
*/
static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
@@ -228,6 +265,7 @@ static void mmci_stop_data(struct mmci_host *host)
writel(0, host->base + MMCIDATACTRL);
mmci_set_mask1(host, 0);
host->data = NULL;
+ host->datactrl_reg = 0;
}
static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
@@ -338,10 +376,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
+static void mmci_dma_data_error(struct mmci_host *host, struct mmc_data *data)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ data->host_cookie = 0;
+}
+
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
- struct dma_chan *chan = host->dma_current;
+ struct dma_chan *chan;
enum dma_data_direction dir;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
u32 status;
int i;
@@ -360,19 +421,12 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- dmaengine_terminate_all(chan);
- if (!data->error)
- data->error = -EIO;
- }
-
- if (data->flags & MMC_DATA_WRITE) {
- dir = DMA_TO_DEVICE;
- } else {
- dir = DMA_FROM_DEVICE;
+ data->error = -EIO;
+ mmci_dma_data_error(host, data);
}
if (!data->host_cookie)
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ mmci_dma_unmap(host, data);
/*
* Use of DMA with scatter-gather is impossible.
@@ -382,16 +436,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
-}
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
}
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
- struct mmci_host_next *next)
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct dma_chan **dma_chan,
+ struct dma_async_tx_descriptor **dma_desc)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -409,16 +462,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
enum dma_data_direction buffer_dirn;
int nr_sg;
- /* Check if next job is already prepared */
- if (data->host_cookie && !next &&
- host->dma_current && host->dma_desc_current)
- return 0;
-
- if (!next) {
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- }
-
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
@@ -433,8 +476,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!chan)
return -EINVAL;
- /* If less than or equal to the fifo size, don't bother with DMA */
- if (data->blksz * data->blocks <= variant->fifosize)
+ /*
+ * If less than or equal to the fifo size, don't bother with DMA
+ * SDIO transfers may not be 4 bytes aligned, fall back to PIO
+ */
+ if (data->blksz * data->blocks <= variant->fifosize ||
+ (data->blksz * data->blocks) & 3)
return -EINVAL;
device = chan->device;
@@ -448,29 +495,42 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!desc)
goto unmap_exit;
- if (next) {
- next->dma_chan = chan;
- next->dma_desc = desc;
- } else {
- host->dma_current = chan;
- host->dma_desc_current = desc;
- }
+ *dma_chan = chan;
+ *dma_desc = desc;
return 0;
unmap_exit:
- if (!next)
- dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static int inline mmci_dma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ /* Check if next job is already prepared. */
+ if (host->dma_current && host->dma_desc_current)
+ return 0;
+
+ /* No job were prepared thus do it now. */
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
+ &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct mmci_host_next *nd = &host->next_data;
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host)
{
int ret;
struct mmc_data *data = host->data;
+ struct variant_data *variant = host->variant;
- ret = mmci_dma_prep_data(host, host->data, NULL);
+ ret = mmci_dma_prep_data(host, host->data);
if (ret)
return ret;
@@ -481,10 +541,15 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
dmaengine_submit(host->dma_desc_current);
dma_async_issue_pending(host->dma_current);
- datactrl |= MCI_DPSM_DMAENABLE;
+ host->datactrl_reg |= MCI_DPSM_DMAENABLE;
+
+ /* Some hardware versions need special flags for SDIO DMA write */
+ if (variant->sdio && host->mmc->card && mmc_card_sdio(host->mmc->card)
+ && (data->flags & MMC_DATA_WRITE))
+ host->datactrl_reg |= variant->dma_sdio_req_ctrl;
/* Trigger the DMA transfer */
- writel(datactrl, host->base + MMCIDATACTRL);
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
/*
* Let the MMCI say when the data is ended and it's time
@@ -501,18 +566,14 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
struct mmci_host_next *next = &host->next_data;
if (data->host_cookie && data->host_cookie != next->cookie) {
- pr_warning("[%s] invalid cookie: data->host_cookie %d"
+ pr_err("[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
+ BUG();
}
- if (!data->host_cookie)
- return;
-
host->dma_desc_current = next->dma_desc;
host->dma_current = next->dma_chan;
-
next->dma_desc = NULL;
next->dma_chan = NULL;
}
@@ -527,19 +588,18 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (!data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
+ BUG_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
return;
- }
- /* if config for dma */
- if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
- ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
- if (mmci_dma_prep_data(host, data, nd))
- data->host_cookie = 0;
- else
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
- }
+ /*
+ * Don't prepare DMA if there is no previous request,
+ * is_first_req is set. Instead, prepare DMA while
+ * start command is being issued.
+ */
+ if (!is_first_req && !mmci_dma_prep_next(host, data))
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
}
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -547,29 +607,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- struct dma_chan *chan;
- enum dma_data_direction dir;
- if (!data)
+ if (!data || !data->host_cookie)
return;
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
- chan = host->dma_rx_channel;
- } else {
- dir = DMA_TO_DEVICE;
- chan = host->dma_tx_channel;
- }
+ mmci_dma_unmap(host, data);
+ if (err) {
+ struct mmci_host_next *next = &host->next_data;
+ struct dma_chan *chan;
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dmaengine_terminate_all(chan);
- /* if config for dma */
- if (chan) {
- if (err)
- dmaengine_terminate_all(chan);
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, dir);
- mrq->data->host_cookie = 0;
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
}
@@ -590,11 +644,20 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
}
-static inline void mmci_dma_data_error(struct mmci_host *host)
+static inline void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host)
{
+ return -ENOSYS;
}
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static inline int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data)
{
return -ENOSYS;
}
@@ -604,10 +667,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
#endif
-static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_setup_datactrl(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
- unsigned int datactrl, timeout, irqmask;
+ unsigned int datactrl, timeout;
unsigned long long clks;
void __iomem *base;
int blksz_bits;
@@ -629,7 +692,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(host->size, base + MMCIDATALENGTH);
blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
@@ -641,14 +703,43 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
/* The ST Micro variants has a special bit to enable SDIO */
if (variant->sdio && host->mmc->card)
- if (mmc_card_sdio(host->mmc->card))
+ if (mmc_card_sdio(host->mmc->card)) {
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO.
+ */
+ u32 clk;
+
datactrl |= MCI_ST_DPSM_SDIOEN;
+ /*
+ * The ST Micro variant for SDIO write transfer sizes
+ * less then 8 bytes needs to have clock H/W flow
+ * control disabled.
+ */
+ if ((host->size < 8) &&
+ (data->flags & MMC_DATA_WRITE))
+ clk = host->clk_reg & ~variant->clkreg_enable;
+ else
+ clk = host->clk_reg | variant->clkreg_enable;
+
+ mmci_write_clkreg(host, clk);
+ }
+ host->datactrl_reg = datactrl;
+ writel(datactrl, base + MMCIDATACTRL);
+}
+
+static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+{
+ unsigned int irqmask;
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
- if (!mmci_dma_start_data(host, datactrl))
+ if (!mmci_dma_start_data(host))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
@@ -672,7 +763,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
- writel(datactrl, base + MMCIDATACTRL);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
mmci_set_mask1(host, irqmask);
}
@@ -699,6 +789,14 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
+ /*
+ * For levelshifters we must not use more than 25MHz when
+ * sending commands.
+ */
+ host->cclk_desired = host->cclk;
+ if (host->plat->ios_handler && (host->cclk_desired > 25000000))
+ mmci_set_clkreg(host, 25000000);
+
host->cmd = cmd;
writel(cmd->arg, base + MMCIARGUMENT);
@@ -715,8 +813,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
- mmci_dma_data_error(host);
+ if (dma_inprogress(host)) {
+ mmci_dma_data_error(host, data);
+ mmci_dma_unmap(host, data);
+ }
/*
* Calculate how far we are into the transfer. Note that
@@ -755,7 +855,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & MCI_DATAEND || data->error) {
if (dma_inprogress(host))
- mmci_dma_unmap(host, data);
+ mmci_dma_finalize(host, data);
mmci_stop_data(host);
if (!data->error)
@@ -789,15 +889,24 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->resp[3] = readl(base + MMCIRESPONSE3);
}
+ /*
+ * For levelshifters we might have decreased cclk to 25MHz when
+ * sending commands, then we restore the frequency here.
+ */
+ if (host->plat->ios_handler && (host->cclk_desired > host->cclk))
+ mmci_set_clkreg(host, host->cclk_desired);
+
if (!cmd->data || cmd->error) {
- if (host->data) {
- /* Terminate the DMA transfer */
- if (dma_inprogress(host))
- mmci_dma_data_error(host);
- mmci_stop_data(host);
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host)) {
+ mmci_dma_data_error(host, host->mrq->data);
+ mmci_dma_unmap(host, host->mrq->data);
}
+ if (host->data)
+ mmci_stop_data(host);
mmci_request_end(host, cmd->mrq);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ mmci_setup_datactrl(host, cmd->data);
mmci_start_data(host, cmd->data);
}
}
@@ -864,22 +973,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
count = min(remain, maxcnt);
/*
- * The ST Micro variant for SDIO transfer sizes
- * less then 8 bytes should have clock H/W flow
- * control disabled.
- */
- if (variant->sdio &&
- mmc_card_sdio(host->mmc->card)) {
- u32 clk;
- if (count < 8)
- clk = host->clk_reg & ~variant->clkreg_enable;
- else
- clk = host->clk_reg | variant->clkreg_enable;
-
- mmci_write_clkreg(host, clk);
- }
-
- /*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc), and the FIFO only accept full 32-bit writes.
@@ -1032,13 +1125,12 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
unsigned long flags;
+ bool dmaprep_after_cmd = false;
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
- mrq->data->blksz);
- mrq->cmd->error = -EINVAL;
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
@@ -1049,14 +1141,28 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
- if (mrq->data)
+ if (mrq->data) {
+ dmaprep_after_cmd =
+ (host->variant->clkreg_enable &&
+ (mrq->data->flags & MMC_DATA_READ)) ||
+ !(mrq->data->flags & MMC_DATA_READ);
mmci_get_next_data(host, mrq->data);
-
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- mmci_start_data(host, mrq->data);
+ if (mrq->data->flags & MMC_DATA_READ) {
+ mmci_setup_datactrl(host, mrq->data);
+ if (!dmaprep_after_cmd)
+ mmci_start_data(host, mrq->data);
+ }
+ }
mmci_start_command(host, mrq->cmd, 0);
+ if (mrq->data && dmaprep_after_cmd) {
+ mmci_dma_prep_data(host, mrq->data);
+
+ if (mrq->data->flags & MMC_DATA_READ)
+ mmci_start_data(host, mrq->data);
+ }
+
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -1178,6 +1284,21 @@ static int mmci_get_cd(struct mmc_host *mmc)
return status;
}
+static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ if (host->plat->ios_handler) {
+ pm_runtime_get_sync(mmc_dev(mmc));
+ ret = host->plat->ios_handler(mmc_dev(mmc), ios);
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+
+ return ret;
+}
+
static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
@@ -1194,6 +1315,7 @@ static const struct mmc_host_ops mmci_ops = {
.set_ios = mmci_set_ios,
.get_ro = mmci_get_ro,
.get_cd = mmci_get_cd,
+ .start_signal_voltage_switch = mmci_sig_volt_switch,
};
static int __devinit mmci_probe(struct amba_device *dev,
@@ -1321,6 +1443,9 @@ static int __devinit mmci_probe(struct amba_device *dev,
mmc->caps = plat->capabilities;
mmc->caps2 = plat->capabilities2;
+ /* We support these PM capabilities. */
+ mmc->pm_caps = MMC_PM_KEEP_POWER;
+
/*
* We can do SGIO
*/
@@ -1390,7 +1515,8 @@ static int __devinit mmci_probe(struct amba_device *dev,
}
if ((host->plat->status || host->gpio_cd != -ENOSYS)
- && host->gpio_cd_irq < 0)
+ && host->gpio_cd_irq < 0
+ && !(mmc->caps & MMC_CAP_NONREMOVABLE))
mmc->caps |= MMC_CAP_NEEDS_POLL;
ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
@@ -1503,6 +1629,75 @@ static int __devexit mmci_remove(struct amba_device *dev)
return 0;
}
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
+static int mmci_save(struct amba_device *dev)
+{
+ struct mmc_host *mmc = amba_get_drvdata(dev);
+ unsigned long flags;
+ struct mmc_ios ios;
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ /* Let the ios_handler act on a POWER_OFF to save power. */
+ if (host->plat->ios_handler) {
+ memcpy(&ios, &mmc->ios, sizeof(struct mmc_ios));
+ ios.power_mode = MMC_POWER_OFF;
+ ret = host->plat->ios_handler(mmc_dev(mmc),
+ &ios);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * Make sure we do not get any interrupts when we disabled the
+ * clock and the regulator and as well make sure to clear the
+ * registers for clock and power.
+ */
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ clk_disable(host->clk);
+ }
+
+ return ret;
+}
+
+static int mmci_restore(struct amba_device *dev)
+{
+ struct mmc_host *mmc = amba_get_drvdata(dev);
+ unsigned long flags;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ clk_enable(host->clk);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore registers and re-enable interrupts. */
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Restore settings done by the ios_handler. */
+ if (host->plat->ios_handler)
+ host->plat->ios_handler(mmc_dev(mmc),
+ &mmc->ios);
+ }
+
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SUSPEND
static int mmci_suspend(struct device *dev)
{
@@ -1511,12 +1706,11 @@ static int mmci_suspend(struct device *dev)
int ret = 0;
if (mmc) {
- struct mmci_host *host = mmc_priv(mmc);
-
ret = mmc_suspend_host(mmc);
if (ret == 0) {
pm_runtime_get_sync(dev);
- writel(0, host->base + MMCIMASK0);
+ mmci_save(adev);
+ amba_pclk_disable(adev);
}
}
@@ -1530,9 +1724,8 @@ static int mmci_resume(struct device *dev)
int ret = 0;
if (mmc) {
- struct mmci_host *host = mmc_priv(mmc);
-
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ amba_pclk_enable(adev);
+ mmci_restore(adev);
pm_runtime_put(dev);
ret = mmc_resume_host(mmc);
@@ -1542,8 +1735,43 @@ static int mmci_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int mmci_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
+ if (!variant->pwrreg_ctrl_power)
+ ret = mmci_save(adev);
+ }
+
+ return ret;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+ int ret = 0;
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
+ if (!variant->pwrreg_ctrl_power)
+ ret = mmci_restore(adev);
+ }
+
+ return ret;
+}
+#endif
+
static const struct dev_pm_ops mmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
+ SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static struct amba_id mmci_ids[] = {
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index d437ccf62d6..5a17beafd05 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -60,6 +60,13 @@
#define MCI_ST_DPSM_RWMOD (1 << 10)
#define MCI_ST_DPSM_SDIOEN (1 << 11)
/* Control register extensions in the ST Micro Ux500 versions */
+/*
+ * DMA request control is required for write
+ * if transfer size is not 32 byte aligned.
+ * DMA request control is also needed if the total
+ * transfer size is 32 byte aligned but any of the
+ * sg element lengths are not aligned with 32 byte.
+ */
#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
#define MCI_ST_DPSM_BUSYMODE (1 << 14)
@@ -179,8 +186,10 @@ struct mmci_host {
unsigned int mclk;
unsigned int cclk;
+ unsigned int cclk_desired;
u32 pwr_reg;
u32 clk_reg;
+ u32 datactrl_reg;
struct mmci_platform_data *plat;
struct variant_data *variant;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index cd3defb11ff..edbf830a23f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/crc32.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -144,6 +145,9 @@ struct smsc911x_data {
/* regulators */
struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
+
+ /* clock */
+ struct clk *fsmc_clk;
};
/* Easy access to information */
@@ -369,7 +373,7 @@ out:
}
/*
- * enable resources, currently just regulators.
+ * enable resources, regulators & clocks.
*/
static int smsc911x_enable_resources(struct platform_device *pdev)
{
@@ -379,9 +383,17 @@ static int smsc911x_enable_resources(struct platform_device *pdev)
ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
- if (ret)
+ if (ret) {
netdev_err(ndev, "failed to enable regulators %d\n",
ret);
+ return ret;
+ }
+
+ if (pdata->fsmc_clk) {
+ ret = clk_enable(pdata->fsmc_clk);
+ if (ret < 0)
+ netdev_err(ndev, "failed to enable clock %d\n", ret);
+ }
return ret;
}
@@ -396,6 +408,8 @@ static int smsc911x_disable_resources(struct platform_device *pdev)
ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
+ if (pdata->fsmc_clk)
+ clk_disable(pdata->fsmc_clk);
return ret;
}
@@ -418,9 +432,17 @@ static int smsc911x_request_resources(struct platform_device *pdev)
ret = regulator_bulk_get(&pdev->dev,
ARRAY_SIZE(pdata->supplies),
pdata->supplies);
- if (ret)
- netdev_err(ndev, "couldn't get regulators %d\n",
- ret);
+ if (ret) {
+ netdev_err(ndev, "couldn't get regulators %d\n", ret);
+ return ret;
+ }
+
+ /* Request clock, ignore if not here */
+ pdata->fsmc_clk = clk_get(NULL, "fsmc");
+ if (IS_ERR(pdata->fsmc_clk)) {
+ netdev_warn(ndev, "couldn't get clock %d\n", ret);
+ pdata->fsmc_clk = NULL;
+ }
return ret;
}
@@ -436,6 +458,12 @@ static void smsc911x_free_resources(struct platform_device *pdev)
/* Free regulators */
regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
pdata->supplies);
+
+ /* Free clock */
+ if (pdata->fsmc_clk) {
+ clk_put(pdata->fsmc_clk);
+ pdata->fsmc_clk = NULL;
+ }
}
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
@@ -2343,6 +2371,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
unsigned int intcfg = 0;
int res_size, irq_flags;
int retval;
+ int to = 100;
pr_info("Driver version %s\n", SMSC_DRV_VERSION);
@@ -2419,6 +2448,18 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (pdata->config.shift)
pdata->ops = &shifted_smsc911x_ops;
+ /* poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms
+ */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+
+ if (to == 0) {
+ pr_err("Device not READY in 100ms aborting\n");
+ goto out_0;
+ }
+
+
retval = smsc911x_init(dev);
if (retval < 0)
goto out_disable_resources;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 99dc29f2f2f..56dfe432b74 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -307,4 +307,23 @@ config AB8500_BATTERY_THERM_ON_BATCTRL
help
Say Y to enable battery temperature measurements using
thermistor connected on BATCTRL ADC.
+
+config AB8500_9100_LI_ION_BATTERY
+ bool "Enable support of the 9100 Li-ion battery charging"
+ depends on AB8500_BM
+ help
+ Say Y to enable support of the 9100 Li-ion battery charging.
+
+config AB5500_BM
+ bool "AB5500 Battery Management Driver"
+ depends on AB5500_CORE && AB5500_GPADC && MACH_U5500
+ help
+ Say Y to include support for AB5500 battery management.
+
+config AB5500_BATTERY_THERM_ON_BATCTRL
+ bool "Thermistor connected on BATCTRL ADC"
+ depends on AB5500_BM
+ help
+ Say Y to enable battery temperature measurements using
+ thermistor connected on BATCTRL ADC.
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b6b243416c0..af27f1d08aa 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
+obj-$(CONFIG_AB5500_BM) += ab5500_charger.o ab5500_btemp.o ab5500_fg.o abx500_chargalg.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
diff --git a/drivers/power/ab5500_btemp.c b/drivers/power/ab5500_btemp.c
new file mode 100644
index 00000000000..3709299d6cb
--- /dev/null
+++ b/drivers/power/ab5500_btemp.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Battery temperature driver for ab5500
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+
+#define BTEMP_THERMAL_LOW_LIMIT -10
+#define BTEMP_THERMAL_MED_LIMIT 0
+#define BTEMP_THERMAL_HIGH_LIMIT_62 62
+
+#define BTEMP_BATCTRL_CURR_SRC_7UA 7
+#define BTEMP_BATCTRL_CURR_SRC_15UA 15
+#define BTEMP_BATCTRL_CURR_SRC_20UA 20
+
+#define UART_MODE 0x0F
+#define BAT_CUR_SRC 0x1F
+#define RESIS_ID_MODE 0x03
+#define RESET 0x00
+#define ADOUT_10K_PULL_UP 0x07
+
+#define to_ab5500_btemp_device_info(x) container_of((x), \
+ struct ab5500_btemp, btemp_psy);
+
+/**
+ * struct ab5500_btemp_interrupts - ab5500 interrupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_btemp_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab5500_btemp_events {
+ bool batt_rem;
+ bool usb_conn;
+};
+
+/**
+ * struct ab5500_btemp - ab5500 BTEMP device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the AB5500
+ * @curr_source: What current source we use, in uA
+ * @bat_temp: Battery temperature in degree Celcius
+ * @prev_bat_temp Last dispatched battery temperature
+ * @node: struct of type list_head
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @gpadc-auto: Pointer to the struct adc_auto_input
+ * @pdata: Pointer to the ab5500_btemp platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @btemp_psy: Structure for BTEMP specific battery properties
+ * @events: Structure for information about events triggered
+ * @btemp_wq: Work queue for measuring the temperature periodically
+ * @btemp_periodic_work: Work for measuring the temperature periodically
+ */
+struct ab5500_btemp {
+ struct device *dev;
+ u8 chip_id;
+ int curr_source;
+ int bat_temp;
+ int prev_bat_temp;
+ struct list_head node;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_btemp_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply btemp_psy;
+ struct ab5500_btemp_events events;
+ struct workqueue_struct *btemp_wq;
+ struct delayed_work btemp_periodic_work;
+};
+
+/* BTEMP power supply properties */
+static enum power_supply_property ab5500_btemp_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static LIST_HEAD(ab5500_btemp_list);
+
+static int ab5500_btemp_bat_temp_trig(int mux);
+
+struct ab5500_btemp *ab5500_btemp_get(void)
+{
+ struct ab5500_btemp *di;
+ di = list_first_entry(&ab5500_btemp_list, struct ab5500_btemp, node);
+
+ return di;
+}
+
+/**
+ * ab5500_btemp_get_batctrl_temp() - get the temperature
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * Returns the batctrl temperature in millidegrees
+ */
+int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *di)
+{
+ return di->bat_temp * 1000;
+}
+
+/**
+ * ab5500_btemp_volt_to_res() - convert batctrl voltage to resistance
+ * @di: pointer to the ab5500_btemp structure
+ * @volt: measured batctrl/btemp_ball voltage
+ * @batcrtl: batctrl/btemp_ball node
+ *
+ * This function returns the battery resistance that is
+ * derived from the BATCTRL/BTEMP_BALL voltage.
+ * Returns value in Ohms.
+ */
+static int ab5500_btemp_volt_to_res(struct ab5500_btemp *di,
+ int volt, bool batctrl)
+{
+ int rbs;
+
+ if (batctrl) {
+ /*
+ * If the battery has internal NTC, we use the current
+ * source to calculate the resistance, 7uA or 20uA
+ */
+ rbs = volt * 1000 / di->curr_source;
+ } else {
+ /*
+ * BTEMP_BALL is internally
+ * connected to 1.8V through a 10k resistor
+ */
+ rbs = (10000 * volt) / (1800 - volt);
+ }
+ return rbs;
+}
+
+/**
+ * ab5500_btemp_read_batctrl_voltage() - measure batctrl voltage
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the voltage on BATCTRL. Returns value in mV.
+ */
+static int ab5500_btemp_read_batctrl_voltage(struct ab5500_btemp *di)
+{
+ int vbtemp;
+ static int prev;
+
+ vbtemp = ab5500_gpadc_convert(di->gpadc, BAT_CTRL);
+ if (vbtemp < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value",
+ __func__);
+ return prev;
+ }
+ prev = vbtemp;
+ return vbtemp;
+}
+
+/**
+ * ab5500_btemp_curr_source_enable() - enable/disable batctrl current source
+ * @di: pointer to the ab5500_btemp structure
+ * @enable: enable or disable the current source
+ *
+ * Enable or disable the current sources for the BatCtrl AD channel
+ */
+static int ab5500_btemp_curr_source_enable(struct ab5500_btemp *di,
+ bool enable)
+{
+ int ret = 0;
+
+ /* Only do this for batteries with internal NTC */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+
+ dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, RESIS_ID_MODE);
+ if (ret) {
+ dev_err(di->dev,
+ "%s failed setting resistance identification mode\n",
+ __func__);
+ return ret;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, BAT_CTRL_15U_ENA);
+ if (ret) {
+ dev_err(di->dev, "%s failed enabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+ } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+ dev_dbg(di->dev, "Disable BATCTRL curr source\n");
+
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ goto disable_curr_source;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling force comp\n",
+ __func__);
+ }
+ }
+ return ret;
+disable_curr_source:
+ /* Write 0 to the curr bits */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI,
+ BAT_CUR_SRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s failed disabling current source\n",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * ab5500_btemp_get_batctrl_res() - get battery resistance
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the battery pack identification resistance.
+ * Returns value in Ohms.
+ */
+static int ab5500_btemp_get_batctrl_res(struct ab5500_btemp *di)
+{
+ int ret;
+ int batctrl;
+ int res;
+
+ ret = ab5500_btemp_curr_source_enable(di, true);
+ /* TODO: This delay has to be optimised */
+ msleep(100);
+ if (ret) {
+ dev_err(di->dev, "%s curr source enable failed\n", __func__);
+ return ret;
+ }
+
+ batctrl = ab5500_btemp_read_batctrl_voltage(di);
+ res = ab5500_btemp_volt_to_res(di, batctrl, true);
+
+ ret = ab5500_btemp_curr_source_enable(di, false);
+ if (ret) {
+ dev_err(di->dev, "%s curr source disable failed\n", __func__);
+ return ret;
+ }
+
+ dev_dbg(di->dev, "%s batctrl: %d res: %d ",
+ __func__, batctrl, res);
+
+ return res;
+}
+
+/**
+ * ab5500_btemp_get_btemp_ball_res() - get battery resistance
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function returns the battery pack identification
+ * resistance using resistor pull-up mode. Returns value in Ohms.
+ */
+static int ab5500_btemp_get_btemp_ball_res(struct ab5500_btemp *di)
+{
+ int ret, vntc;
+
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART,
+ UART_MODE, ADOUT_10K_PULL_UP);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable 10k pull up to Vadout\n");
+ return ret;
+ }
+
+ vntc = ab5500_gpadc_convert(di->gpadc, BTEMP_BALL);
+ if (vntc < 0) {
+ dev_err(di->dev, "%s gpadc conversion failed,"
+ " using previous value\n", __func__);
+ return vntc;
+ }
+
+ return ab5500_btemp_volt_to_res(di, vntc, false);
+}
+
+/**
+ * ab5500_btemp_temp_to_res() - temperature to resistance
+ * @di: pointer to the ab5500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @temp: temperature to calculate the resistance from
+ *
+ * This function returns the battery resistance in ohms
+ * based on temperature.
+ */
+static int ab5500_btemp_temp_to_res(struct ab5500_btemp *di,
+ const struct abx500_res_to_temp *tbl, int tbl_size, int temp)
+{
+ int i, res;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (temp < tbl[0].temp)
+ i = 0;
+ else if (temp >= tbl[tbl_size - 1].temp)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(temp >= tbl[i].temp &&
+ temp < tbl[i + 1].temp))
+ i++;
+ }
+
+ res = tbl[i].resist + ((tbl[i + 1].resist - tbl[i].resist) *
+ (temp - tbl[i].temp)) / (tbl[i + 1].temp - tbl[i].temp);
+ return res;
+}
+
+/**
+ * ab5500_btemp_temp_to_volt() - temperature to adc voltage
+ * @di: pointer to the ab5500_btemp structure
+ * @temp: temperature to calculate the voltage from
+ *
+ * This function returns the adc voltage in millivolts
+ * based on temperature.
+ */
+static int ab5500_btemp_temp_to_volt(struct ab5500_btemp *di, int temp)
+{
+ int res, id;
+
+ id = di->bat->batt_id;
+ res = ab5500_btemp_temp_to_res(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements,
+ temp);
+ /*
+ * BTEMP_BALL is internally connected to 1.8V
+ * through a 10k resistor
+ */
+ return((1800 * res) / (10000 + res));
+}
+
+/**
+ * ab5500_btemp_res_to_temp() - resistance to temperature
+ * @di: pointer to the ab5500_btemp structure
+ * @tbl: pointer to the resiatance to temperature table
+ * @tbl_size: size of the resistance to temperature table
+ * @res: resistance to calculate the temperature from
+ *
+ * This function returns the battery temperature in degrees Celcius
+ * based on the NTC resistance.
+ */
+static int ab5500_btemp_res_to_temp(struct ab5500_btemp *di,
+ const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+{
+ int i, temp;
+ /*
+ * Calculate the formula for the straight line
+ * Simple interpolation if we are within
+ * the resistance table limits, extrapolate
+ * if resistance is outside the limits.
+ */
+ if (res > tbl[0].resist)
+ i = 0;
+ else if (res <= tbl[tbl_size - 1].resist)
+ i = tbl_size - 2;
+ else {
+ i = 0;
+ while (!(res <= tbl[i].resist &&
+ res > tbl[i + 1].resist))
+ i++;
+ }
+
+ temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
+ (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+ return temp;
+}
+
+/**
+ * ab5500_btemp_measure_temp() - measure battery temperature
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * Returns battery temperature (on success) else the previous temperature
+ */
+static int ab5500_btemp_measure_temp(struct ab5500_btemp *di)
+{
+ int temp, rbat;
+ u8 id;
+
+ id = di->bat->batt_id;
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ id != BATTERY_UNKNOWN && !di->bat->auto_trig)
+ rbat = ab5500_btemp_get_batctrl_res(di);
+ else
+ rbat = ab5500_btemp_get_btemp_ball_res(di);
+
+ if (rbat < 0) {
+ dev_err(di->dev, "%s failed to get resistance\n", __func__);
+ /*
+ * Return out-of-range temperature so that
+ * charging is stopped
+ */
+ return BTEMP_THERMAL_LOW_LIMIT;
+ }
+
+ temp = ab5500_btemp_res_to_temp(di,
+ di->bat->bat_type[id].r_to_t_tbl,
+ di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+ dev_dbg(di->dev, "Battery temperature is %d\n", temp);
+
+ return temp;
+}
+
+/**
+ * ab5500_btemp_id() - Identify the connected battery
+ * @di: pointer to the ab5500_btemp structure
+ *
+ * This function will try to identify the battery by reading the ID
+ * resistor. Some brands use a combined ID resistor with a NTC resistor to
+ * both be able to identify and to read the temperature of it.
+ */
+static int ab5500_btemp_id(struct ab5500_btemp *di)
+{
+ int res;
+ u8 i;
+
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
+ di->bat->batt_id = BATTERY_UNKNOWN;
+
+ res = ab5500_btemp_get_batctrl_res(di);
+ if (res < 0) {
+ dev_err(di->dev, "%s get batctrl res failed\n", __func__);
+ return -ENXIO;
+ }
+
+ /* BATTERY_UNKNOWN is defined on position 0, skip it! */
+ for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
+ if ((res <= di->bat->bat_type[i].resis_high) &&
+ (res >= di->bat->bat_type[i].resis_low)) {
+ dev_dbg(di->dev, "Battery detected on %s"
+ " low %d < res %d < high: %d"
+ " index: %d\n",
+ di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+ "BATCTRL" : "BATTEMP",
+ di->bat->bat_type[i].resis_low, res,
+ di->bat->bat_type[i].resis_high, i);
+
+ di->bat->batt_id = i;
+ break;
+ }
+ }
+
+ if (di->bat->batt_id == BATTERY_UNKNOWN) {
+ dev_warn(di->dev, "Battery identified as unknown"
+ ", resistance %d Ohm\n", res);
+ return -ENXIO;
+ }
+
+ /*
+ * We only have to change current source if the
+ * detected type is Type 1, else we use the 7uA source
+ */
+ if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+ di->bat->batt_id == 1) {
+ dev_dbg(di->dev, "Set BATCTRL current source to 15uA\n");
+ di->curr_source = BTEMP_BATCTRL_CURR_SRC_15UA;
+ }
+
+ return di->bat->batt_id;
+}
+
+/**
+ * ab5500_btemp_periodic_work() - Measuring the temperature periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work function for measuring the temperature periodically
+ */
+static void ab5500_btemp_periodic_work(struct work_struct *work)
+{
+ struct ab5500_btemp *di = container_of(work,
+ struct ab5500_btemp, btemp_periodic_work.work);
+
+ di->bat_temp = ab5500_btemp_measure_temp(di);
+
+ if (di->bat_temp != di->prev_bat_temp) {
+ di->prev_bat_temp = di->bat_temp;
+ power_supply_changed(&di->btemp_psy);
+ }
+ di->bat->temp_now = di->bat_temp;
+
+ if (!di->bat->auto_trig) {
+ /* Check for temperature limits */
+ if (di->bat_temp <= BTEMP_THERMAL_LOW_LIMIT) {
+ dev_err(di->dev,
+ "battery temp less than lower threshold\n");
+ power_supply_changed(&di->btemp_psy);
+ } else if (di->bat_temp >= BTEMP_THERMAL_HIGH_LIMIT_62) {
+ dev_err(di->dev,
+ "battery temp greater them max threshold\n");
+ power_supply_changed(&di->btemp_psy);
+ }
+
+ /* Schedule a new measurement */
+ if (di->events.usb_conn)
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(di->bat->interval_charging * HZ));
+ else
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(di->bat->interval_not_charging * HZ));
+ } else {
+ /* Schedule a new measurement */
+ queue_delayed_work(di->btemp_wq,
+ &di->btemp_periodic_work,
+ round_jiffies(di->bat->interval_charging * HZ));
+ }
+}
+
+/**
+ * ab5500_btemp_batt_removal_handler() - battery removal detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab5500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_btemp_batt_removal_handler(int irq, void *_di)
+{
+ struct ab5500_btemp *di = _di;
+ dev_err(di->dev, "Battery removal detected!\n");
+
+ di->events.batt_rem = true;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_btemp_batt_attach_handler() - battery insertion detected
+ * @irq: interrupt number
+ * @_di: void pointer that has to address of ab5500_btemp
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_btemp_batt_attach_handler(int irq, void *_di)
+{
+ struct ab5500_btemp *di = _di;
+ dev_err(di->dev, "Battery attached!\n");
+
+ di->events.batt_rem = false;
+ power_supply_changed(&di->btemp_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_btemp_periodic() - Periodic temperature measurements
+ * @di: pointer to the ab5500_btemp structure
+ * @enable: enable or disable periodic temperature measurements
+ *
+ * Starts of stops periodic temperature measurements. Periodic measurements
+ * should only be done when a charger is connected.
+ */
+static void ab5500_btemp_periodic(struct ab5500_btemp *di,
+ bool enable)
+{
+ dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
+ enable);
+
+ if (enable)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+ else
+ cancel_delayed_work_sync(&di->btemp_periodic_work);
+}
+
+/**
+ * ab5500_btemp_get_property() - get the btemp properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the btemp
+ * properties by reading the sysfs files.
+ * online: presence of the battery
+ * present: presence of the battery
+ * technology: battery technology
+ * temp: battery temperature
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_btemp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_btemp *di;
+
+ di = to_ab5500_btemp_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (di->events.batt_rem)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = di->bat->bat_type[di->bat->batt_id].name;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ if (di->bat->batt_id == BATTERY_UNKNOWN)
+ /*
+ * In case the battery is not identified, its assumed that
+ * we are using the power supply and since no monitoring is
+ * done for the same, a nominal temp is hardocded.
+ */
+ val->intval = 250;
+ else
+ val->intval = di->bat_temp * 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab5500_btemp_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab5500_btemp *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab5500_btemp_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_USB:
+ /* USB disconnected */
+ if (!ret.intval && di->events.usb_conn) {
+ di->events.usb_conn = false;
+ if (di->bat->auto_trig)
+ ab5500_btemp_periodic(di,
+ false);
+ }
+ /* USB connected */
+ else if (ret.intval && !di->events.usb_conn) {
+ di->events.usb_conn = true;
+ if (di->bat->auto_trig)
+ ab5500_btemp_periodic(di, true);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab5500_btemp_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is pointing to the function pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in the external power
+ * supply to the btemp.
+ */
+static void ab5500_btemp_external_power_changed(struct power_supply *psy)
+{
+ struct ab5500_btemp *di = to_ab5500_btemp_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->btemp_psy, ab5500_btemp_get_ext_psy_data);
+}
+
+/* ab5500 btemp driver interrupts and their respective isr */
+static struct ab5500_btemp_interrupts ab5500_btemp_irq[] = {
+ {"BATT_REMOVAL", ab5500_btemp_batt_removal_handler},
+ {"BATT_ATTACH", ab5500_btemp_batt_attach_handler},
+};
+
+static int ab5500_btemp_bat_temp_trig(int mux)
+{
+ struct ab5500_btemp *di = ab5500_btemp_get();
+ int temp = ab5500_btemp_measure_temp(di);
+
+ if (temp < (BTEMP_THERMAL_LOW_LIMIT+1)) {
+ dev_err(di->dev,
+ "battery temp less than lower threshold (-10 deg cel)\n");
+ power_supply_changed(&di->btemp_psy);
+ } else if (temp > (BTEMP_THERMAL_HIGH_LIMIT_62-1)) {
+ dev_err(di->dev, "battery temp greater them max threshold\n");
+ power_supply_changed(&di->btemp_psy);
+ }
+
+ return 0;
+}
+
+static int ab5500_btemp_auto_temp(struct ab5500_btemp *di)
+{
+ struct adc_auto_input *auto_ip;
+ int ret = 0;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(di->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = BTEMP_BALL;
+ auto_ip->freq = MS500;
+ auto_ip->min = ab5500_btemp_temp_to_volt(di,
+ BTEMP_THERMAL_HIGH_LIMIT_62);
+ auto_ip->max = ab5500_btemp_temp_to_volt(di,
+ BTEMP_THERMAL_LOW_LIMIT);
+ auto_ip->auto_adc_callback = ab5500_btemp_bat_temp_trig;
+ di->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto);
+ if (ret)
+ dev_err(di->dev,
+ "failed to set auto trigger for battery temp\n");
+ return ret;
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_btemp_resume(struct platform_device *pdev)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+
+ if (di->events.usb_conn)
+ ab5500_btemp_periodic(di, true);
+
+ return 0;
+}
+
+static int ab5500_btemp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+
+ if (di->events.usb_conn)
+ ab5500_btemp_periodic(di, false);
+
+ return 0;
+}
+#else
+#define ab5500_btemp_suspend NULL
+#define ab5500_btemp_resume NULL
+#endif
+
+static int __devexit ab5500_btemp_remove(struct platform_device *pdev)
+{
+ struct ab5500_btemp *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ /* Delete the work queue */
+ destroy_workqueue(di->btemp_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->btemp_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di->gpadc_auto);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_btemp_probe(struct platform_device *pdev)
+{
+ int irq, i, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab5500_btemp *di =
+ kzalloc(sizeof(struct ab5500_btemp), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->btemp;
+ di->bat = plat_data->battery;
+
+ /* get btemp specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no btemp platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* BTEMP supply */
+ di->btemp_psy.name = "ab5500_btemp";
+ di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->btemp_psy.properties = ab5500_btemp_props;
+ di->btemp_psy.num_properties = ARRAY_SIZE(ab5500_btemp_props);
+ di->btemp_psy.get_property = ab5500_btemp_get_property;
+ di->btemp_psy.supplied_to = di->pdata->supplied_to;
+ di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
+ di->btemp_psy.external_power_changed =
+ ab5500_btemp_external_power_changed;
+
+
+ /* Create a work queue for the btemp */
+ di->btemp_wq =
+ create_singlethread_workqueue("ab5500_btemp_wq");
+ if (di->btemp_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for measuring temperature periodically */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ ab5500_btemp_periodic_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_btemp_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "ab5500 CID is: 0x%02x\n",
+ di->chip_id);
+
+ /* Identify the battery */
+ if (ab5500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+
+ /* Measure temperature once initially */
+ di->bat_temp = ab5500_btemp_measure_temp(di);
+ di->bat->temp_now = di->bat_temp;
+
+ /* Register BTEMP power supply class */
+ ret = power_supply_register(di->dev, &di->btemp_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register BTEMP psy\n");
+ goto free_btemp_wq;
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_btemp_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab5500_btemp_irq[i].name, di);
+
+ if (ret) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_btemp_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_btemp_irq[i].name, irq, ret);
+ }
+
+ if (!di->bat->auto_trig) {
+ /* Schedule monitoring work only if battery type is known */
+ if (di->bat->batt_id != BATTERY_UNKNOWN)
+ queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
+ } else {
+ ret = ab5500_btemp_auto_temp(di);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to register auto trigger for battery temp\n");
+ goto free_irq;
+ }
+ }
+
+ platform_set_drvdata(pdev, di);
+ list_add_tail(&di->node, &ab5500_btemp_list);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_irq:
+ power_supply_unregister(&di->btemp_psy);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name);
+ free_irq(irq, di);
+ }
+free_btemp_wq:
+ destroy_workqueue(di->btemp_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_btemp_driver = {
+ .probe = ab5500_btemp_probe,
+ .remove = __devexit_p(ab5500_btemp_remove),
+ .suspend = ab5500_btemp_suspend,
+ .resume = ab5500_btemp_resume,
+ .driver = {
+ .name = "ab5500-btemp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_btemp_init(void)
+{
+ return platform_driver_register(&ab5500_btemp_driver);
+}
+
+static void __exit ab5500_btemp_exit(void)
+{
+ platform_driver_unregister(&ab5500_btemp_driver);
+}
+
+subsys_initcall_sync(ab5500_btemp_init);
+module_exit(ab5500_btemp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-btemp");
+MODULE_DESCRIPTION("AB5500 battery temperature driver");
diff --git a/drivers/power/ab5500_charger.c b/drivers/power/ab5500_charger.c
new file mode 100644
index 00000000000..b90c51a4f31
--- /dev/null
+++ b/drivers/power/ab5500_charger.c
@@ -0,0 +1,1820 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Charger driver for AB5500
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/usb/otg.h>
+
+/* Charger constants */
+#define NO_PW_CONN 0
+#define USB_PW_CONN 2
+
+/* HW failure constants */
+#define VBUS_CH_NOK 0x0A
+#define VBUS_OVV_TH 0x06
+
+/* AB5500 Charger constants */
+#define AB5500_USB_LINK_STATUS 0x78
+#define CHARGER_REV_SUP 0x10
+#define SW_EOC 0x40
+#define USB_CHAR_DET 0x02
+#define VBUS_RISING 0x20
+#define VBUS_FALLING 0x40
+#define USB_LINK_UPDATE 0x02
+#define USB_CH_TH_PROT_LOW 0x02
+#define USB_CH_TH_PROT_HIGH 0x01
+#define USB_ID_HOST_DET_ENA_MASK 0x02
+#define USB_ID_HOST_DET_ENA 0x02
+#define USB_ID_DEVICE_DET_ENA_MASK 0x01
+#define USB_ID_DEVICE_DET_ENA 0x01
+#define CHARGER_ISET_IN_1_1A 0x0C
+#define LED_ENABLE 0x01
+#define RESET 0x00
+#define SSW_ENABLE_REBOOT 0x80
+#define SSW_REBOOT_EN 0x40
+#define SSW_CONTROL_AUTOC 0x04
+#define SSW_PSEL_480S 0x00
+
+/* UsbLineStatus register - usb types */
+enum ab5500_charger_link_status {
+ USB_STAT_NOT_CONFIGURED,
+ USB_STAT_STD_HOST_NC,
+ USB_STAT_STD_HOST_C_NS,
+ USB_STAT_STD_HOST_C_S,
+ USB_STAT_HOST_CHG_NM,
+ USB_STAT_HOST_CHG_HS,
+ USB_STAT_HOST_CHG_HS_CHIRP,
+ USB_STAT_DEDICATED_CHG,
+ USB_STAT_ACA_RID_A,
+ USB_STAT_ACA_RID_B,
+ USB_STAT_ACA_RID_C_NM,
+ USB_STAT_ACA_RID_C_HS,
+ USB_STAT_ACA_RID_C_HS_CHIRP,
+ USB_STAT_HM_IDGND,
+ USB_STAT_RESERVED,
+ USB_STAT_NOT_VALID_LINK,
+};
+
+enum ab5500_usb_state {
+ AB5500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
+ AB5500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
+ AB5500_BM_USB_STATE_CONFIGURED,
+ AB5500_BM_USB_STATE_SUSPEND,
+ AB5500_BM_USB_STATE_RESUME,
+ AB5500_BM_USB_STATE_MAX,
+};
+
+/* VBUS input current limits supported in AB5500 in mA */
+#define USB_CH_IP_CUR_LVL_0P05 50
+#define USB_CH_IP_CUR_LVL_0P09 98
+#define USB_CH_IP_CUR_LVL_0P19 193
+#define USB_CH_IP_CUR_LVL_0P29 290
+#define USB_CH_IP_CUR_LVL_0P38 380
+#define USB_CH_IP_CUR_LVL_0P45 450
+#define USB_CH_IP_CUR_LVL_0P5 500
+#define USB_CH_IP_CUR_LVL_0P6 600
+#define USB_CH_IP_CUR_LVL_0P7 700
+#define USB_CH_IP_CUR_LVL_0P8 800
+#define USB_CH_IP_CUR_LVL_0P9 900
+#define USB_CH_IP_CUR_LVL_1P0 1000
+#define USB_CH_IP_CUR_LVL_1P1 1100
+#define USB_CH_IP_CUR_LVL_1P3 1300
+#define USB_CH_IP_CUR_LVL_1P4 1400
+#define USB_CH_IP_CUR_LVL_1P5 1500
+
+#define to_ab5500_charger_usb_device_info(x) container_of((x), \
+ struct ab5500_charger, usb_chg)
+
+/**
+ * struct ab5500_charger_interrupts - ab5500 interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_charger_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct ab5500_charger_info {
+ int charger_connected;
+ int charger_online;
+ int charger_voltage;
+ int cv_active;
+ bool wd_expired;
+};
+
+struct ab5500_charger_event_flags {
+ bool usb_thermal_prot;
+ bool vbus_ovv;
+ bool usbchargernotok;
+ bool vbus_collapse;
+};
+
+struct ab5500_charger_usb_state {
+ bool usb_changed;
+ int usb_current;
+ enum ab5500_usb_state state;
+ spinlock_t usb_lock;
+};
+
+/**
+ * struct ab5500_charger - ab5500 Charger device information
+ * @dev: Pointer to the structure device
+ * @chip_id: Chip-Id of the ab5500
+ * @max_usb_in_curr: Max USB charger input current
+ * @vbus_detected: VBUS detected
+ * @vbus_detected_start:
+ * VBUS detected during startup
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @pdata: Pointer to the ab5500_charger platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @flags: Structure for information about events triggered
+ * @usb_state: Structure for usb stack information
+ * @usb_chg: USB charger power supply
+ * @ac: Structure that holds the AC charger properties
+ * @usb: Structure that holds the USB charger properties
+ * @charger_wq: Work queue for the IRQs and checking HW state
+ * @check_hw_failure_work: Work for checking HW state
+ * @check_usbchgnotok_work: Work for checking USB charger not ok status
+ * @ac_work: Work for checking AC charger connection
+ * @detect_usb_type_work: Work for detecting the USB type connected
+ * @usb_link_status_work: Work for checking the new USB link status
+ * @usb_state_changed_work: Work for checking USB state
+ * @check_main_thermal_prot_work:
+ * Work for checking Main thermal status
+ * @check_usb_thermal_prot_work:
+ * Work for checking USB thermal status
+ * @ otg: pointer to struct otg_transceiver, used to
+ * notify the current during a standard host
+ * charger.
+ * @nb: structture of type notifier_block, which has
+ * a function pointer referenced by usb driver.
+ */
+struct ab5500_charger {
+ struct device *dev;
+ u8 chip_id;
+ int max_usb_in_curr;
+ bool vbus_detected;
+ bool vbus_detected_start;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct abx500_charger_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct ab5500_charger_event_flags flags;
+ struct ab5500_charger_usb_state usb_state;
+ struct ux500_charger usb_chg;
+ struct ab5500_charger_info usb;
+ struct workqueue_struct *charger_wq;
+ struct delayed_work check_hw_failure_work;
+ struct delayed_work check_usbchgnotok_work;
+ struct work_struct detect_usb_type_work;
+ struct work_struct usb_link_status_work;
+ struct work_struct usb_state_changed_work;
+ struct work_struct check_usb_thermal_prot_work;
+ struct otg_transceiver *otg;
+ struct notifier_block nb;
+};
+
+/* USB properties */
+static enum power_supply_property ab5500_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+/**
+ * ab5500_charger_get_vbus_voltage() - get vbus voltage
+ * @di: pointer to the ab5500_charger structure
+ *
+ * This function returns the vbus voltage.
+ * Returns vbus voltage (on success)
+ */
+static int ab5500_charger_get_vbus_voltage(struct ab5500_charger *di)
+{
+ int vch;
+
+ /* Only measure voltage if the charger is connected */
+ if (di->usb.charger_connected) {
+ vch = ab5500_gpadc_convert(di->gpadc, VBUS_V);
+ if (vch < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ vch = 0;
+ }
+ return vch;
+}
+
+/**
+ * ab5500_charger_get_usb_current() - get usb charger current
+ * @di: pointer to the ab5500_charger structure
+ *
+ * This function returns the usb charger current.
+ * Returns usb current (on success) and error code on failure
+ */
+static int ab5500_charger_get_usb_current(struct ab5500_charger *di)
+{
+ int ich;
+
+ /* Only measure current if the charger is online */
+ if (di->usb.charger_online) {
+ ich = ab5500_gpadc_convert(di->gpadc, USB_CHARGER_C);
+ if (ich < 0)
+ dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ } else {
+ ich = 0;
+ }
+ return ich;
+}
+
+/**
+ * ab5500_charger_detect_chargers() - Detect the connected chargers
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Returns the type of charger connected.
+ * For USB it will not mean we can actually charge from it
+ * but that there is a USB cable connected that we have to
+ * identify. This is used during startup when we don't get
+ * interrupts of the charger detection
+ *
+ * Returns an integer value, that means,
+ * NO_PW_CONN no power supply is connected
+ * USB_PW_CONN if the USB power supply is connected
+ */
+static int ab5500_charger_detect_chargers(struct ab5500_charger *di)
+{
+ int result = NO_PW_CONN;
+ int ret;
+ u8 val;
+ /* Check for USB charger */
+ /*
+ * TODO: Since there are no status register validating by
+ * reading the IT souce registers
+ */
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_IT,
+ AB5500_IT_SOURCE8, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return ret;
+ }
+
+ if (val & VBUS_RISING)
+ result |= USB_PW_CONN;
+ else if (val & VBUS_FALLING)
+ result = NO_PW_CONN;
+
+ return result;
+}
+
+/**
+ * ab5500_charger_max_usb_curr() - get the max curr for the USB type
+ * @di: pointer to the ab5500_charger structure
+ * @link_status: the identified USB type
+ *
+ * Get the maximum current that is allowed to be drawn from the host
+ * based on the USB type.
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_charger_max_usb_curr(struct ab5500_charger *di,
+ enum ab5500_charger_link_status link_status)
+{
+ int ret = 0;
+
+ switch (link_status) {
+ case USB_STAT_STD_HOST_NC:
+ case USB_STAT_STD_HOST_C_NS:
+ case USB_STAT_STD_HOST_C_S:
+ dev_dbg(di->dev, "USB Type - Standard host is "
+ "detected through USB driver\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case USB_STAT_HOST_CHG_HS_CHIRP:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ case USB_STAT_HOST_CHG_HS:
+ case USB_STAT_ACA_RID_C_HS:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+ break;
+ case USB_STAT_ACA_RID_A:
+ /*
+ * Dedicated charger level minus maximum current accessory
+ * can consume (300mA). Closest level is 1100mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+ break;
+ case USB_STAT_ACA_RID_B:
+ /*
+ * Dedicated charger level minus 120mA (20mA for ACA and
+ * 100mA for potential accessory). Closest level is 1300mA
+ */
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+ break;
+ case USB_STAT_DEDICATED_CHG:
+ case USB_STAT_HOST_CHG_NM:
+ case USB_STAT_ACA_RID_C_HS_CHIRP:
+ case USB_STAT_ACA_RID_C_NM:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+ break;
+ case USB_STAT_RESERVED:
+ /*
+ * This state is used to indicate that VBUS has dropped below
+ * the detection level 4 times in a row. This is due to the
+ * charger output current is set to high making the charger
+ * voltage collapse. This have to be propagated through to
+ * chargalg. This is done using the property
+ * POWER_SUPPLY_PROP_CURRENT_AVG = 1
+ */
+ di->flags.vbus_collapse = true;
+ dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
+ "VBUS has collapsed\n");
+ ret = -1;
+ break;
+ case USB_STAT_HM_IDGND:
+ case USB_STAT_NOT_CONFIGURED:
+ case USB_STAT_NOT_VALID_LINK:
+ dev_err(di->dev, "USB Type - Charging not allowed\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ default:
+ dev_err(di->dev, "USB Type - Unknown\n");
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ ret = -ENXIO;
+ break;
+ };
+
+ dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+ link_status, di->max_usb_in_curr);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_read_usb_type() - read the type of usb connected
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Detect the type of the plugged USB
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_charger_read_usb_type(struct ab5500_charger *di)
+{
+ int ret;
+ u8 val;
+
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_USB,
+ AB5500_USB_LINE_STATUS, &val);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return ret;
+ }
+
+ /* get the USB type */
+ val = (val & AB5500_USB_LINK_STATUS) >> 3;
+ ret = ab5500_charger_max_usb_curr(di,
+ (enum ab5500_charger_link_status) val);
+
+ return ret;
+}
+
+static int ab5500_charger_voltage_map[] = {
+ 3500 ,
+ 3525 ,
+ 3550 ,
+ 3575 ,
+ 3600 ,
+ 3625 ,
+ 3650 ,
+ 3675 ,
+ 3700 ,
+ 3725 ,
+ 3750 ,
+ 3775 ,
+ 3800 ,
+ 3825 ,
+ 3850 ,
+ 3875 ,
+ 3900 ,
+ 3925 ,
+ 3950 ,
+ 3975 ,
+ 4000 ,
+ 4025 ,
+ 4050 ,
+ 4060 ,
+ 4070 ,
+ 4080 ,
+ 4090 ,
+ 4100 ,
+ 4110 ,
+ 4120 ,
+ 4130 ,
+ 4140 ,
+ 4150 ,
+ 4160 ,
+ 4170 ,
+ 4180 ,
+ 4190 ,
+ 4200 ,
+ 4210 ,
+ 4220 ,
+ 4230 ,
+ 4240 ,
+ 4250 ,
+ 4260 ,
+ 4270 ,
+ 4280 ,
+ 4290 ,
+ 4300 ,
+ 4310 ,
+ 4320 ,
+ 4330 ,
+ 4340 ,
+ 4350 ,
+ 4360 ,
+ 4370 ,
+ 4380 ,
+ 4390 ,
+ 4400 ,
+ 4410 ,
+ 4420 ,
+ 4430 ,
+ 4440 ,
+ 4450 ,
+ 4460 ,
+ 4470 ,
+ 4480 ,
+ 4490 ,
+ 4500 ,
+ 4510 ,
+ 4520 ,
+ 4530 ,
+ 4540 ,
+ 4550 ,
+ 4560 ,
+ 4570 ,
+ 4580 ,
+ 4590 ,
+ 4600 ,
+};
+
+/*
+ * This array maps the raw hex value to charger current used by the ab5500
+ * Values taken from the AB5500 product specification manual
+ */
+static int ab5500_charger_current_map[] = {
+ 100 ,
+ 200 ,
+ 300 ,
+ 400 ,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1500,
+ 1500,
+};
+
+static int ab5500_icsr_current_map[] = {
+ 50,
+ 93,
+ 193,
+ 290,
+ 380,
+ 450,
+ 500 ,
+ 600 ,
+ 700 ,
+ 800 ,
+ 900 ,
+ 1000,
+ 1100,
+ 1300,
+ 1400,
+ 1500,
+};
+
+static int ab5500_cvrec_voltage_map[] = {
+ 3300,
+ 3325,
+ 3350,
+ 3375,
+ 3400,
+ 3425,
+ 3450,
+ 3475,
+ 3500,
+ 3525,
+ 3550,
+ 3575,
+ 3600,
+ 3625,
+ 3650,
+ 3675,
+ 3700,
+ 3725,
+ 3750,
+ 3775,
+ 3800,
+ 3825,
+ 3850,
+ 3875,
+ 3900,
+ 3925,
+ 4000,
+ 4025,
+ 4050,
+ 4075,
+ 4100,
+ 4125,
+ 4150,
+ 4175,
+ 4200,
+ 4225,
+ 4250,
+ 4275,
+ 4300,
+ 4325,
+ 4350,
+ 4375,
+ 4400,
+ 4425,
+ 4450,
+ 4475,
+ 4500,
+ 4525,
+ 4550,
+ 4575,
+ 4600,
+};
+
+static int ab5500_cvrec_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.3V */
+ if (voltage < ab5500_cvrec_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(ab5500_cvrec_voltage_map); i++) {
+ if (voltage < ab5500_cvrec_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_cvrec_voltage_map) - 1;
+ if (voltage == ab5500_cvrec_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_voltage_to_regval(int voltage)
+{
+ int i;
+
+ /* Special case for voltage below 3.3V */
+ if (voltage < ab5500_charger_voltage_map[0])
+ return 0;
+
+ for (i = 1; i < ARRAY_SIZE(ab5500_charger_voltage_map); i++) {
+ if (voltage < ab5500_charger_voltage_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_charger_voltage_map) - 1;
+ if (voltage == ab5500_charger_voltage_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_icsr_curr_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab5500_icsr_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab5500_icsr_current_map); i++) {
+ if (curr < ab5500_icsr_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_icsr_current_map) - 1;
+ if (curr == ab5500_icsr_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+static int ab5500_current_to_regval(int curr)
+{
+ int i;
+
+ if (curr < ab5500_charger_current_map[0])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_current_map); i++) {
+ if (curr < ab5500_charger_current_map[i])
+ return i - 1;
+ }
+
+ /* If not last element, return error */
+ i = ARRAY_SIZE(ab5500_charger_current_map) - 1;
+ if (curr == ab5500_charger_current_map[i])
+ return i;
+ else
+ return -1;
+}
+
+/**
+ * ab5500_charger_get_usb_cur() - get usb current
+ * @di: pointer to the ab5500_charger structre
+ *
+ * The usb stack provides the maximum current that can be drawn from
+ * the standard usb host. This will be in mA.
+ * This function converts current in mA to a value that can be written
+ * to the register. Returns -1 if charging is not allowed
+ */
+static int ab5500_charger_get_usb_cur(struct ab5500_charger *di)
+{
+ switch (di->usb_state.usb_current) {
+ case 50:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ break;
+ case 100:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+ break;
+ case 200:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+ break;
+ case 300:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+ break;
+ case 400:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+ break;
+ case 500:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+ break;
+ default:
+ di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+ return -1;
+ break;
+ };
+ return 0;
+}
+
+/**
+ * ab5500_charger_set_vbus_in_curr() - set VBUS input current limit
+ * @di: pointer to the ab5500_charger structure
+ * @ich_in: charger input current limit
+ *
+ * Sets the current that can be drawn from the USB host
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_set_vbus_in_curr(struct ab5500_charger *di,
+ int ich_in)
+{
+ int ret;
+ int input_curr_index;
+ int min_value;
+
+ /* We should always use to lowest current limit */
+ min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+
+ input_curr_index = ab5500_icsr_curr_to_regval(min_value);
+ if (input_curr_index < 0) {
+ dev_err(di->dev, "VBUS input current limit too high\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG,
+ AB5500_ICSR, input_curr_index);
+ if (ret)
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_usb_en() - enable usb charging
+ * @di: pointer to the ab5500_charger structure
+ * @enable: enable/disable flag
+ * @vset: charging voltage
+ * @ich_out: charger output current
+ *
+ * Enable/Disable USB charging and turns on/off the charging led respectively.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_usb_en(struct ux500_charger *charger,
+ int enable, int vset, int ich_out)
+{
+ int ret;
+ int volt_index;
+ int curr_index;
+
+ struct ab5500_charger *di = to_ab5500_charger_usb_device_info(charger);
+
+ if (enable) {
+ /* Check if USB is connected */
+ if (!di->usb.charger_connected) {
+ dev_err(di->dev, "USB charger not connected\n");
+ return -ENXIO;
+ }
+
+ /* Enable USB charging */
+ dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
+
+ volt_index = ab5500_voltage_to_regval(vset);
+ curr_index = ab5500_current_to_regval(ich_out) ;
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, ich_out);
+ if (ret) {
+ dev_err(di->dev, "%s setting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Battery voltage when charging should be resumed after
+ * completion of charging
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CVREC,
+ ab5500_cvrec_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].recharge_vol));
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Battery temperature:
+ * Input to the TBDATA register corresponds to the battery
+ * temperature(temp being multiples of 2)
+ * In order to obatain the value to be written to this reg
+ * divide the temperature obtained from gpadc by 2
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_TBDATA,
+ di->bat->temp_now / 2);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* If success power on charging LED indication */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_LEDT, LED_ENABLE);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Register DCIOCURRENT is one among the charging watchdog
+ * rekick sequence, hence irrespective of usb charging this
+ * register will have to be written.
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_DCIOCURRENT,
+ RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ di->usb.charger_online = 1;
+ } else {
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /* USBChInputCurr: current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s resetting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ /* If success power off charging LED indication */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_LEDT, RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ di->usb.charger_online = 0;
+ di->usb.wd_expired = false;
+ dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
+ }
+ power_supply_changed(&di->usb_chg.psy);
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_watchdog_kick() - kick charger watchdog
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Kick charger watchdog
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_watchdog_kick(struct ux500_charger *charger)
+{
+ int ret;
+ struct ab5500_charger *di;
+ int volt_index, curr_index;
+ u8 value = 0;
+
+ /* TODO: update */
+ if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab5500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_STARTUP,
+ AB5500_MCB, &value);
+ if (ret)
+ dev_err(di->dev, "Failed to read!\n");
+
+ value = value | (SSW_ENABLE_REBOOT | SSW_REBOOT_EN |
+ SSW_CONTROL_AUTOC | SSW_PSEL_480S);
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_STARTUP,
+ AB5500_MCB, value);
+ if (ret)
+ dev_err(di->dev, "Failed to kick WD!\n");
+
+ volt_index = ab5500_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].normal_vol_lvl);
+ curr_index = ab5500_current_to_regval(di->max_usb_in_curr);
+
+ /* ChVoltLevel: max voltage upto which battery can be charged */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ /* current that can be drawn from the usb */
+ ret = ab5500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+ if (ret) {
+ dev_err(di->dev, "%s setting icsr failed %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+
+ /* ChOutputCurentLevel: protected output current */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ /*
+ * Battery voltage when charging should be resumed after
+ * completion of charging
+ */
+ /* Charger_Vrechar[5:0] = '4.025 V' */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CVREC,
+ ab5500_cvrec_voltage_to_regval(
+ di->bat->bat_type[di->bat->batt_id].recharge_vol));
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Battery temperature:
+ * Input to the TBDATA register corresponds to the battery
+ * temperature(temp being multiples of 2)
+ * In order to obatain the value to be written to this reg
+ * divide the temperature obtained from gpadc by 2
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_TBDATA,
+ di->bat->temp_now / 2);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+ /*
+ * Register DCIOCURRENT is one among the charging watchdog
+ * rekick sequence, hence irrespective of usb charging this
+ * register will have to be written.
+ */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_DCIOCURRENT,
+ RESET);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_update_charger_current() - update charger current
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Update the charger output current for the specified charger
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_update_charger_current(struct ux500_charger *charger,
+ int ich_out)
+{
+ int ret = 0;
+ int curr_index;
+ struct ab5500_charger *di;
+
+ if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+ di = to_ab5500_charger_usb_device_info(charger);
+ else
+ return -ENXIO;
+
+ curr_index = ab5500_current_to_regval(ich_out);
+ if (curr_index < 0) {
+ dev_err(di->dev,
+ "Charger current too high, "
+ "charging not started\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG,
+ AB5500_OCSRV, (u8) curr_index);
+ if (ret) {
+ dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_charger_check_hw_failure_work() - check main charger failure
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the main charger status
+ */
+static void ab5500_charger_check_hw_failure_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_hw_failure_work.work);
+
+ /* Check if the status bits for HW failure is still active */
+ if (di->flags.vbus_ovv) {
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_PHY_STATUS,
+ &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ if (!(reg_value & VBUS_OVV_TH)) {
+ di->flags.vbus_ovv = false;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+ /* If we still have a failure, schedule a new check */
+ if (di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, round_jiffies(HZ));
+ }
+}
+
+/**
+ * ab5500_charger_detect_usb_type_work() - work to detect USB type
+ * @work: Pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+void ab5500_charger_detect_usb_type_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, detect_usb_type_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab5500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ }
+}
+
+/**
+ * ab5500_charger_usb_link_status_work() - work to detect USB type
+ * @work: pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab5500_charger_usb_link_status_work(struct work_struct *work)
+{
+ int ret;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, usb_link_status_work);
+
+ /*
+ * Since we can't be sure that the events are received
+ * synchronously, we have the check if is
+ * connected by reading the status register
+ */
+ ret = ab5500_charger_detect_chargers(di);
+ if (ret < 0)
+ return;
+
+ if (!(ret & USB_PW_CONN)) {
+ di->vbus_detected = 0;
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ } else {
+ di->vbus_detected = 1;
+ ret = ab5500_charger_read_usb_type(di);
+ if (!ret) {
+ /* Update maximum input current */
+ ret = ab5500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ } else if (ret == -ENXIO) {
+ /* No valid charger type detected */
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ }
+}
+
+static void ab5500_charger_usb_state_changed_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, usb_state_changed_work);
+
+ if (!di->vbus_detected)
+ return;
+
+ spin_lock_irqsave(&di->usb_state.usb_lock, flags);
+ di->usb_state.usb_changed = false;
+ spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
+
+ /*
+ * wait for some time until you get updates from the usb stack
+ * and negotiations are completed
+ */
+ msleep(250);
+
+ if (di->usb_state.usb_changed)
+ return;
+
+ dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
+ __func__, di->usb_state.state, di->usb_state.usb_current);
+
+ switch (di->usb_state.state) {
+ case AB5500_BM_USB_STATE_RESET_HS:
+ case AB5500_BM_USB_STATE_RESET_FS:
+ case AB5500_BM_USB_STATE_SUSPEND:
+ case AB5500_BM_USB_STATE_MAX:
+ di->usb.charger_connected = 0;
+ power_supply_changed(&di->usb_chg.psy);
+ break;
+
+ case AB5500_BM_USB_STATE_RESUME:
+ /*
+ * when suspend->resume there should be delay
+ * of 1sec for enabling charging
+ */
+ msleep(1000);
+ /* Intentional fall through */
+ case AB5500_BM_USB_STATE_CONFIGURED:
+ /*
+ * USB is configured, enable charging with the charging
+ * input current obtained from USB driver
+ */
+ if (!ab5500_charger_get_usb_cur(di)) {
+ /* Update maximum input current */
+ ret = ab5500_charger_set_vbus_in_curr(di,
+ di->max_usb_in_curr);
+ if (ret)
+ return;
+
+ di->usb.charger_connected = 1;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+ break;
+
+ default:
+ break;
+ };
+}
+
+/**
+ * ab5500_charger_check_usbchargernotok_work() - check USB chg not ok status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB charger Not OK status
+ */
+static void ab5500_charger_check_usbchargernotok_work(struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+ bool prev_status;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_usbchgnotok_work.work);
+
+ /* Check if the status bit for usbchargernotok is still active */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_CHGFSM_CHARGER_DETECT, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ prev_status = di->flags.usbchargernotok;
+
+ if (reg_value & VBUS_CH_NOK) {
+ di->flags.usbchargernotok = true;
+ /* Check again in 1sec */
+ queue_delayed_work(di->charger_wq,
+ &di->check_usbchgnotok_work, HZ);
+ } else {
+ di->flags.usbchargernotok = false;
+ di->flags.vbus_collapse = false;
+ }
+
+ if (prev_status != di->flags.usbchargernotok)
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab5500_charger_check_usb_thermal_prot_work() - check usb thermal status
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the USB thermal prot status
+ */
+static void ab5500_charger_check_usb_thermal_prot_work(
+ struct work_struct *work)
+{
+ int ret;
+ u8 reg_value;
+
+ struct ab5500_charger *di = container_of(work,
+ struct ab5500_charger, check_usb_thermal_prot_work);
+
+ /* Check if the status bit for usb_thermal_prot is still active */
+ /* TODO: Interrupt source reg 15 bit 4 */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_CHGFSM_USB_BTEMP_CURR_LIM, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ab5500 read failed\n", __func__);
+ return;
+ }
+ if (reg_value & USB_CH_TH_PROT_LOW || reg_value & USB_CH_TH_PROT_HIGH)
+ di->flags.usb_thermal_prot = true;
+ else
+ di->flags.usb_thermal_prot = false;
+
+ power_supply_changed(&di->usb_chg.psy);
+}
+
+/**
+ * ab5500_charger_vbusdetf_handler() - VBUS falling detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusdetf_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS falling detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_vbusdetr_handler() - VBUS rising detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusdetr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ di->vbus_detected = true;
+ dev_dbg(di->dev, "VBUS rising detected\n");
+ queue_work(di->charger_wq, &di->detect_usb_type_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usblinkstatus_handler() - USB link status has changed
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usblinkstatus_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "USB link status changed\n");
+
+ if (!di->usb.charger_online)
+ queue_work(di->charger_wq, &di->usb_link_status_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usbchthprotr_handler() - Die temp is above usb charger
+ * thermal protection threshold
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usbchthprotr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev,
+ "Die temp above USB charger thermal protection threshold\n");
+ queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usbchargernotokr_handler() - USB charger not ok detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_usbchargernotokr_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "Not allowed USB charger detected\n");
+ queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_chwdexp_handler() - Charger watchdog expired
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_chwdexp_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "Charger watchdog expired\n");
+
+ /*
+ * The charger that was online when the watchdog expired
+ * needs to be restarted for charging to start again
+ */
+ if (di->usb.charger_online) {
+ di->usb.wd_expired = true;
+ power_supply_changed(&di->usb_chg.psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_vbusovv_handler() - VBUS overvoltage detected
+ * @irq: interrupt number
+ * @_di: pointer to the ab5500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab5500_charger_vbusovv_handler(int irq, void *_di)
+{
+ struct ab5500_charger *di = _di;
+
+ dev_dbg(di->dev, "VBUS overvoltage detected\n");
+ di->flags.vbus_ovv = true;
+ power_supply_changed(&di->usb_chg.psy);
+
+ /* Schedule a new HW failure check */
+ queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ab5500_charger_usb_get_property() - get the usb properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the usb
+ * properties by reading the sysfs files.
+ * USB properties are online, present and voltage.
+ * online: usb charging is in progress or not
+ * present: presence of the usb
+ * voltage: vbus voltage
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab5500_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_charger *di;
+
+ di = to_ab5500_charger_usb_device_info(psy_to_ux500_charger(psy));
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (di->flags.usbchargernotok)
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else if (di->usb.wd_expired)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (di->flags.usb_thermal_prot)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (di->flags.vbus_ovv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = di->usb.charger_online;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = di->usb.charger_connected;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ di->usb.charger_voltage = ab5500_charger_get_vbus_voltage(di);
+ val->intval = di->usb.charger_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ab5500_charger_get_usb_current(di) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ /*
+ * This property is used to indicate when VBUS has collapsed
+ * due to too high output current from the USB charger
+ */
+ if (di->flags.vbus_collapse)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * ab5500_charger_hw_registers() - Set up charger related registers
+ * @di: pointer to the ab5500_charger structure
+ *
+ * Set up charger OVV, watchdog and maximum voltage registers as well as
+ * charging of the backup battery
+ */
+static int ab5500_charger_init_hw_registers(struct ab5500_charger *di)
+{
+ int ret = 0;
+
+ /* Enable ID Host and Device detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_OTG_CTRL,
+ USB_ID_HOST_DET_ENA_MASK, USB_ID_HOST_DET_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable usb charger detection\n");
+ goto out;
+ }
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_USB, AB5500_USB_OTG_CTRL,
+ USB_ID_DEVICE_DET_ENA_MASK, USB_ID_DEVICE_DET_ENA);
+ if (ret) {
+ dev_err(di->dev, "failed to enable usb charger detection\n");
+ goto out;
+ }
+
+ /* Over current protection for reverse supply */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CREVS, CHARGER_REV_SUP,
+ CHARGER_REV_SUP);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable over current protection for reverse supply\n");
+ goto out;
+ }
+
+ /* Enable SW EOC at flatcurrent detection */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_CHG, AB5500_CCTRL, SW_EOC, SW_EOC);
+ if (ret) {
+ dev_err(di->dev,
+ "failed to enable end of charge at flatcurrent detection\n");
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/*
+ * ab5500 charger driver interrupts and their respective isr
+ */
+static struct ab5500_charger_interrupts ab5500_charger_irq[] = {
+ {"VBUS_FALLING", ab5500_charger_vbusdetf_handler},
+ {"VBUS_RISING", ab5500_charger_vbusdetr_handler},
+ {"USB_LINK_UPDATE", ab5500_charger_usblinkstatus_handler},
+ {"USB_CH_TH_PROTECTION", ab5500_charger_usbchthprotr_handler},
+ {"USB_CH_NOT_OK", ab5500_charger_usbchargernotokr_handler},
+ {"OVV", ab5500_charger_vbusovv_handler},
+ /* TODO: Interrupt missing, will be available in cut 2 */
+ /*{"CHG_SW_TIMER_OUT", ab5500_charger_chwdexp_handler},*/
+};
+
+static int ab5500_charger_usb_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *power)
+{
+ struct ab5500_charger *di =
+ container_of(nb, struct ab5500_charger, nb);
+ enum ab5500_usb_state bm_usb_state;
+ unsigned mA = *((unsigned *)power);
+
+ if (event != USB_EVENT_VBUS) {
+ dev_dbg(di->dev, "not a standard host, returning\n");
+ return NOTIFY_DONE;
+ }
+
+ /* TODO: State is fabricate here. See if charger really needs USB
+ * state or if mA is enough
+ */
+ if ((di->usb_state.usb_current == 2) && (mA > 2))
+ bm_usb_state = AB5500_BM_USB_STATE_RESUME;
+ else if (mA == 0)
+ bm_usb_state = AB5500_BM_USB_STATE_RESET_HS;
+ else if (mA == 2)
+ bm_usb_state = AB5500_BM_USB_STATE_SUSPEND;
+ else if (mA >= 8) /* 8, 100, 500 */
+ bm_usb_state = AB5500_BM_USB_STATE_CONFIGURED;
+ else /* Should never occur */
+ bm_usb_state = AB5500_BM_USB_STATE_RESET_FS;
+
+ dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
+ __func__, bm_usb_state, mA);
+
+ spin_lock(&di->usb_state.usb_lock);
+ di->usb_state.usb_changed = true;
+ di->usb_state.state = bm_usb_state;
+ di->usb_state.usb_current = mA;
+ spin_unlock(&di->usb_state.usb_lock);
+
+ queue_work(di->charger_wq, &di->usb_state_changed_work);
+
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_charger_resume(struct platform_device *pdev)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+
+ /* If we still have a HW failure, schedule a new check */
+ if (di->flags.usbchargernotok || di->flags.vbus_ovv) {
+ queue_delayed_work(di->charger_wq,
+ &di->check_hw_failure_work, 0);
+ }
+
+ return 0;
+}
+
+static int ab5500_charger_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+
+ /* Cancel any pending HW failure check */
+ if (delayed_work_pending(&di->check_hw_failure_work))
+ cancel_delayed_work(&di->check_hw_failure_work);
+
+ return 0;
+}
+#else
+#define ab5500_charger_suspend NULL
+#define ab5500_charger_resume NULL
+#endif
+
+static int __devexit ab5500_charger_remove(struct platform_device *pdev)
+{
+ struct ab5500_charger *di = platform_get_drvdata(pdev);
+ int i, irq;
+
+ /* Disable USB charging */
+ ab5500_charger_usb_en(&di->usb_chg, false, 0, 0);
+
+ /* Disable interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+
+ otg_unregister_notifier(di->otg, &di->nb);
+ otg_put_transceiver(di->otg);
+
+ /* Delete the work queue */
+ destroy_workqueue(di->charger_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->usb_chg.psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di);
+
+ return 0;
+}
+
+static int __devinit ab5500_charger_probe(struct platform_device *pdev)
+{
+ int irq, i, charger_status, ret = 0;
+ struct abx500_bm_plat_data *plat_data;
+
+ struct ab5500_charger *di =
+ kzalloc(sizeof(struct ab5500_charger), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ /* initialize lock */
+ spin_lock_init(&di->usb_state.usb_lock);
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->charger;
+ di->bat = plat_data->battery;
+
+ /* get charger specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no charger platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ /* USB supply */
+ /* power_supply base class */
+ di->usb_chg.psy.name = "ab5500_usb";
+ di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
+ di->usb_chg.psy.properties = ab5500_charger_usb_props;
+ di->usb_chg.psy.num_properties = ARRAY_SIZE(ab5500_charger_usb_props);
+ di->usb_chg.psy.get_property = ab5500_charger_usb_get_property;
+ di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
+ di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
+ /* ux500_charger sub-class */
+ di->usb_chg.ops.enable = &ab5500_charger_usb_en;
+ di->usb_chg.ops.kick_wd = &ab5500_charger_watchdog_kick;
+ di->usb_chg.ops.update_curr = &ab5500_charger_update_charger_current;
+ di->usb_chg.max_out_volt = ab5500_charger_voltage_map[
+ ARRAY_SIZE(ab5500_charger_voltage_map) - 1];
+ di->usb_chg.max_out_curr = ab5500_charger_current_map[
+ ARRAY_SIZE(ab5500_charger_current_map) - 1];
+
+
+ /* Create a work queue for the charger */
+ di->charger_wq =
+ create_singlethread_workqueue("ab5500_charger_wq");
+ if (di->charger_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for HW failure check */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ ab5500_charger_check_hw_failure_work);
+ INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ ab5500_charger_check_usbchargernotok_work);
+
+ /* Init work for charger detection */
+ INIT_WORK(&di->usb_link_status_work,
+ ab5500_charger_usb_link_status_work);
+ INIT_WORK(&di->detect_usb_type_work,
+ ab5500_charger_detect_usb_type_work);
+
+ INIT_WORK(&di->usb_state_changed_work,
+ ab5500_charger_usb_state_changed_work);
+
+ /* Init work for checking HW status */
+ INIT_WORK(&di->check_usb_thermal_prot_work,
+ ab5500_charger_check_usb_thermal_prot_work);
+
+ /* Get Chip ID of the ABB ASIC */
+ ret = abx500_get_chip_id(di->dev);
+ if (ret < 0) {
+ dev_err(di->dev, "failed to get chip ID\n");
+ goto free_charger_wq;
+ }
+ di->chip_id = ret;
+ dev_dbg(di->dev, "AB5500 CID is: 0x%02x\n", di->chip_id);
+
+ /* Initialize OVV, and other registers */
+ ret = ab5500_charger_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize ABB registers\n");
+ goto free_device_info;
+ }
+
+ /* Register USB charger class */
+ ret = power_supply_register(di->dev, &di->usb_chg.psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register USB charger\n");
+ goto free_device_info;
+ }
+
+ di->otg = otg_get_transceiver();
+ if (!di->otg) {
+ dev_err(di->dev, "failed to get otg transceiver\n");
+ goto free_usb;
+ }
+ di->nb.notifier_call = ab5500_charger_usb_notifier_call;
+ ret = otg_register_notifier(di->otg, &di->nb);
+ if (ret) {
+ dev_err(di->dev, "failed to register otg notifier\n");
+ goto put_otg_transceiver;
+ }
+
+ /* Identify the connected charger types during startup */
+ charger_status = ab5500_charger_detect_chargers(di);
+ if (charger_status & USB_PW_CONN) {
+ dev_dbg(di->dev, "VBUS Detect during startup\n");
+ di->vbus_detected = true;
+ di->vbus_detected_start = true;
+ queue_work(di->charger_wq,
+ &di->usb_link_status_work);
+ }
+
+ /* Register interrupts */
+ for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ ret = request_threaded_irq(irq, NULL, ab5500_charger_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ ab5500_charger_irq[i].name, di);
+
+ if (ret != 0) {
+ dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ , ab5500_charger_irq[i].name, irq, ret);
+ goto free_irq;
+ }
+ dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ ab5500_charger_irq[i].name, irq, ret);
+ }
+
+ platform_set_drvdata(pdev, di);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+free_irq:
+ otg_unregister_notifier(di->otg, &di->nb);
+
+ /* We also have to free all successfully registered irqs */
+ for (i = i - 1; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name);
+ free_irq(irq, di);
+ }
+put_otg_transceiver:
+ otg_put_transceiver(di->otg);
+free_usb:
+ power_supply_unregister(&di->usb_chg.psy);
+free_charger_wq:
+ destroy_workqueue(di->charger_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_charger_driver = {
+ .probe = ab5500_charger_probe,
+ .remove = __devexit_p(ab5500_charger_remove),
+ .suspend = ab5500_charger_suspend,
+ .resume = ab5500_charger_resume,
+ .driver = {
+ .name = "ab5500-charger",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_charger_init(void)
+{
+ return platform_driver_register(&ab5500_charger_driver);
+}
+
+static void __exit ab5500_charger_exit(void)
+{
+ platform_driver_unregister(&ab5500_charger_driver);
+}
+
+subsys_initcall_sync(ab5500_charger_init);
+module_exit(ab5500_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-charger");
+MODULE_DESCRIPTION("AB5500 charger management driver");
diff --git a/drivers/power/ab5500_fg.c b/drivers/power/ab5500_fg.c
new file mode 100644
index 00000000000..c74d351bd8b
--- /dev/null
+++ b/drivers/power/ab5500_fg.c
@@ -0,0 +1,1954 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ *
+ * Main and Back-up battery management driver.
+ *
+ * Note: Backup battery management is required in case of Li-Ion battery and not
+ * for capacitive battery. HREF boards have capacitive battery and hence backup
+ * battery management is not used and the supported code is available in this
+ * driver.
+ *
+ * License Terms: GNU General Public License v2
+ * Authors:
+ * Johan Palsson <johan.palsson@stericsson.com>
+ * Karl Komierowski <karl.komierowski@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/mfd/abx500/ab5500-bm.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+static LIST_HEAD(ab5500_fg_list);
+
+/* U5500 Constants */
+#define FG_ON_MASK 0x04
+#define FG_ON 0x04
+#define FG_ACC_RESET_ON_READ_MASK 0x08
+#define FG_ACC_RESET_ON_READ 0x08
+#define EN_READOUT_MASK 0x01
+#define EN_READOUT 0x01
+#define EN_ACC_RESET_ON_READ 0x08
+#define ACC_RESET_ON_READ 0x08
+#define RESET 0x00
+#define EOC_52_mA 0x04
+#define MILLI_TO_MICRO 1000
+#define FG_LSB_IN_MA 770
+#define QLSB_NANO_AMP_HOURS_X100 5353
+#define SEC_TO_SAMPLE(S) (S * 4)
+#define NBR_AVG_SAMPLES 20
+#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
+#define FG_PERIODIC_START_INTERVAL (250 * HZ)/1000 /* 250 msec */
+
+#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
+
+#define interpolate(x, x1, y1, x2, y2) \
+ ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
+
+#define to_ab5500_fg_device_info(x) container_of((x), \
+ struct ab5500_fg, fg_psy);
+
+/**
+ * struct ab5500_fg_interrupts - ab5500 fg interupts
+ * @name: name of the interrupt
+ * @isr function pointer to the isr
+ */
+struct ab5500_fg_interrupts {
+ char *name;
+ irqreturn_t (*isr)(int irq, void *data);
+};
+
+enum ab5500_fg_discharge_state {
+ AB5500_FG_DISCHARGE_INIT,
+ AB5500_FG_DISCHARGE_INITMEASURING,
+ AB5500_FG_DISCHARGE_INIT_RECOVERY,
+ AB5500_FG_DISCHARGE_RECOVERY,
+ AB5500_FG_DISCHARGE_READOUT,
+ AB5500_FG_DISCHARGE_WAKEUP,
+};
+
+static char *discharge_state[] = {
+ "DISCHARGE_INIT",
+ "DISCHARGE_INITMEASURING",
+ "DISCHARGE_INIT_RECOVERY",
+ "DISCHARGE_RECOVERY",
+ "DISCHARGE_READOUT",
+ "DISCHARGE_WAKEUP",
+};
+
+enum ab5500_fg_charge_state {
+ AB5500_FG_CHARGE_INIT,
+ AB5500_FG_CHARGE_READOUT,
+};
+
+static char *charge_state[] = {
+ "CHARGE_INIT",
+ "CHARGE_READOUT",
+};
+
+enum ab5500_fg_calibration_state {
+ AB5500_FG_CALIB_INIT,
+ AB5500_FG_CALIB_WAIT,
+ AB5500_FG_CALIB_END,
+};
+
+struct ab5500_fg_avg_cap {
+ int avg;
+ int samples[NBR_AVG_SAMPLES];
+ __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ int pos;
+ int nbr_samples;
+ int sum;
+};
+
+struct ab5500_fg_battery_capacity {
+ int max_mah_design;
+ int max_mah;
+ int mah;
+ int permille;
+ int level;
+ int prev_mah;
+ int prev_percent;
+ int prev_level;
+};
+
+struct ab5500_fg_flags {
+ bool fg_enabled;
+ bool conv_done;
+ bool charging;
+ bool fully_charged;
+ bool low_bat_delay;
+ bool low_bat;
+ bool bat_ovv;
+ bool batt_unknown;
+ bool calibrate;
+};
+
+/**
+ * struct ab5500_fg - ab5500 FG device information
+ * @dev: Pointer to the structure device
+ * @vbat: Battery voltage in mV
+ * @vbat_nom: Nominal battery voltage in mV
+ * @inst_curr: Instantenous battery current in mA
+ * @avg_curr: Average battery current in mA
+ * @fg_samples: Number of samples used in the FG accumulation
+ * @accu_charge: Accumulated charge from the last conversion
+ * @recovery_cnt: Counter for recovery mode
+ * @high_curr_cnt: Counter for high current mode
+ * @init_cnt: Counter for init mode
+ * @v_to_cap: capacity based on battery voltage
+ * @recovery_needed: Indicate if recovery is needed
+ * @high_curr_mode: Indicate if we're in high current mode
+ * @init_capacity: Indicate if initial capacity measuring should be done
+ * @calib_state State during offset calibration
+ * @discharge_state: Current discharge state
+ * @charge_state: Current charge state
+ * @flags: Structure for information about events triggered
+ * @bat_cap: Structure for battery capacity specific parameters
+ * @avg_cap: Average capacity filter
+ * @parent: Pointer to the struct ab5500
+ * @gpadc: Pointer to the struct gpadc
+ * @gpadc_auto: Pointer tot he struct adc_auto_input
+ * @pdata: Pointer to the ab5500_fg platform data
+ * @bat: Pointer to the ab5500_bm platform data
+ * @fg_psy: Structure that holds the FG specific battery properties
+ * @fg_wq: Work queue for running the FG algorithm
+ * @fg_periodic_work: Work to run the FG algorithm periodically
+ * @fg_low_bat_work: Work to check low bat condition
+ * @fg_reinit_work: Work to reset and re-initialize fuel gauge
+ * @fg_work: Work to run the FG algorithm instantly
+ * @fg_acc_cur_work: Work to read the FG accumulator
+ * @cc_lock: Mutex for locking the CC
+ * @node: struct of type list_head
+ */
+struct ab5500_fg {
+ struct device *dev;
+ int vbat;
+ int vbat_nom;
+ int inst_curr;
+ int avg_curr;
+ int fg_samples;
+ int accu_charge;
+ int recovery_cnt;
+ int high_curr_cnt;
+ int init_cnt;
+ int v_to_cap;
+ bool recovery_needed;
+ bool high_curr_mode;
+ bool init_capacity;
+ enum ab5500_fg_calibration_state calib_state;
+ enum ab5500_fg_discharge_state discharge_state;
+ enum ab5500_fg_charge_state charge_state;
+ struct ab5500_fg_flags flags;
+ struct ab5500_fg_battery_capacity bat_cap;
+ struct ab5500_fg_avg_cap avg_cap;
+ struct ab5500 *parent;
+ struct ab5500_gpadc *gpadc;
+ struct adc_auto_input *gpadc_auto;
+ struct abx500_fg_platform_data *pdata;
+ struct abx500_bm_data *bat;
+ struct power_supply fg_psy;
+ struct workqueue_struct *fg_wq;
+ struct delayed_work fg_periodic_work;
+ struct delayed_work fg_low_bat_work;
+ struct delayed_work fg_reinit_work;
+ struct work_struct fg_work;
+ struct delayed_work fg_acc_cur_work;
+ struct mutex cc_lock;
+ struct list_head node;
+ struct timer_list avg_current_timer;
+};
+
+/* Main battery properties */
+static enum power_supply_property ab5500_fg_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+};
+
+/* Function Prototype */
+static int ab5500_fg_bat_v_trig(int mux);
+
+static int prev_samples, prev_val;
+
+struct ab5500_fg *ab5500_fg_get(void)
+{
+ struct ab5500_fg *di;
+ di = list_first_entry(&ab5500_fg_list, struct ab5500_fg, node);
+
+ return di;
+}
+
+/**
+ * ab5500_fg_is_low_curr() - Low or high current mode
+ * @di: pointer to the ab5500_fg structure
+ * @curr: the current to base or our decision on
+ *
+ * Low current mode if the current consumption is below a certain threshold
+ */
+static int ab5500_fg_is_low_curr(struct ab5500_fg *di, int curr)
+{
+ /*
+ * We want to know if we're in low current mode
+ */
+ if (curr > -di->bat->fg_params->high_curr_threshold)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ab5500_fg_add_cap_sample() - Add capacity to average filter
+ * @di: pointer to the ab5500_fg structure
+ * @sample: the capacity in mAh to add to the filter
+ *
+ * A capacity is added to the filter and a new mean capacity is calculated and
+ * returned
+ */
+static int ab5500_fg_add_cap_sample(struct ab5500_fg *di, int sample)
+{
+ struct timespec ts;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ do {
+ avg->sum += sample - avg->samples[avg->pos];
+ avg->samples[avg->pos] = sample;
+ avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->pos++;
+
+ if (avg->pos == NBR_AVG_SAMPLES)
+ avg->pos = 0;
+
+ if (avg->nbr_samples < NBR_AVG_SAMPLES)
+ avg->nbr_samples++;
+
+ /*
+ * Check the time stamp for each sample. If too old,
+ * replace with latest sample
+ */
+ } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+
+ avg->avg = avg->sum / avg->nbr_samples;
+
+ return avg->avg;
+}
+
+/**
+ * ab5500_fg_clear_cap_samples() - Clear average filter
+ * @di: pointer to the ab5500_fg structure
+ *
+ * The capacity filter is is reset to zero.
+ */
+static void ab5500_fg_clear_cap_samples(struct ab5500_fg *di)
+{
+ int i;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ avg->pos = 0;
+ avg->nbr_samples = 0;
+ avg->sum = 0;
+ avg->avg = 0;
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = 0;
+ avg->time_stamps[i] = 0;
+ }
+}
+
+
+/**
+ * ab5500_fg_fill_cap_sample() - Fill average filter
+ * @di: pointer to the ab5500_fg structure
+ * @sample: the capacity in mAh to fill the filter with
+ *
+ * The capacity filter is filled with a capacity in mAh
+ */
+static void ab5500_fg_fill_cap_sample(struct ab5500_fg *di, int sample)
+{
+ int i;
+ struct timespec ts;
+ struct ab5500_fg_avg_cap *avg = &di->avg_cap;
+
+ getnstimeofday(&ts);
+
+ for (i = 0; i < NBR_AVG_SAMPLES; i++) {
+ avg->samples[i] = sample;
+ avg->time_stamps[i] = ts.tv_sec;
+ }
+
+ avg->pos = 0;
+ avg->nbr_samples = NBR_AVG_SAMPLES;
+ avg->sum = sample * NBR_AVG_SAMPLES;
+ avg->avg = sample;
+}
+
+/**
+ * ab5500_fg_coulomb_counter() - enable coulomb counter
+ * @di: pointer to the ab5500_fg structure
+ * @enable: enable/disable
+ *
+ * Enable/Disable coulomb counter.
+ * On failure returns negative value.
+ */
+static int ab5500_fg_coulomb_counter(struct ab5500_fg *di, bool enable)
+{
+ int ret = 0;
+ mutex_lock(&di->cc_lock);
+ if (enable) {
+ /* Power-up the CC */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ (FG_ON | FG_ACC_RESET_ON_READ));
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = true;
+ } else {
+ /* Stop the CC */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ FG_ON_MASK, RESET);
+ if (ret)
+ goto cc_err;
+
+ di->flags.fg_enabled = false;
+
+ }
+ dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
+ enable, di->fg_samples);
+
+ mutex_unlock(&di->cc_lock);
+
+ return ret;
+cc_err:
+ dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+/**
+ * ab5500_fg_inst_curr() - battery instantaneous current
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery instantenous current(on success) else error code
+ */
+static int ab5500_fg_inst_curr(struct ab5500_fg *di)
+{
+ u8 low, high;
+ static int val;
+ int ret = 0;
+ bool fg_off = false;
+
+ if (!di->flags.fg_enabled) {
+ fg_off = true;
+ /* Power-up the CC */
+ ab5500_fg_coulomb_counter(di, true);
+ msleep(250);
+ }
+
+ mutex_lock(&di->cc_lock);
+
+ /* Enable read request */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_B,
+ EN_READOUT_MASK, EN_READOUT);
+ if (ret)
+ goto inst_curr_err;
+
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FGDIR_READ0, &low);
+ if (ret < 0)
+ goto inst_curr_err;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FGDIR_READ1, &high);
+ if (ret < 0)
+ goto inst_curr_err;
+
+ /*
+ * negative value for Discharging
+ * convert 2's compliment into decimal
+ */
+ if (high & 0x10)
+ val = (low | (high << 8) | 0xFFFFE000);
+ else
+ val = (low | (high << 8));
+
+ /*
+ * Convert to unit value in mA
+ * R(FGSENSE) = 20 mOhm
+ * Scaling of LSB: This corresponds fro R(FGSENSE) to a current of
+ * I = Q/t = 192.7 uC * 4 Hz = 0.77mA
+ */
+ val = (val * 770) / 1000;
+
+ mutex_unlock(&di->cc_lock);
+
+ if (fg_off) {
+ dev_dbg(di->dev, "%s Disable FG\n", __func__);
+ /* Power-off the CC */
+ ab5500_fg_coulomb_counter(di, false);
+ }
+
+ return val;
+
+inst_curr_err:
+ dev_err(di->dev, "%s Get instanst current failed\n", __func__);
+ mutex_unlock(&di->cc_lock);
+ return ret;
+}
+
+static void ab5500_fg_acc_cur_timer_expired(unsigned long data)
+{
+ struct ab5500_fg *di = (struct ab5500_fg *) data;
+ dev_dbg(di->dev, "Avg current timer expired\n");
+
+ /* Trigger execution of the algorithm instantly */
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, 0);
+}
+
+/**
+ * ab5500_fg_acc_cur_work() - average battery current
+ * @work: pointer to the work_struct structure
+ *
+ * Updated the average battery current obtained from the
+ * coulomb counter.
+ */
+static void ab5500_fg_acc_cur_work(struct work_struct *work)
+{
+ int val, raw_val, sample;
+ int ret;
+ u8 low, med, high, cnt_low, cnt_high;
+
+ struct ab5500_fg *di = container_of(work,
+ struct ab5500_fg, fg_acc_cur_work.work);
+
+ if (!di->flags.fg_enabled) {
+ /* Power-up the CC */
+ ab5500_fg_coulomb_counter(di, true);
+ msleep(250);
+ }
+ mutex_lock(&di->cc_lock);
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_C,
+ EN_READOUT_MASK, EN_READOUT);
+ if (ret < 0)
+ goto exit;
+ /* If charging read charging registers for accumulated values */
+ if (di->flags.charging) {
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, EN_ACC_RESET_ON_READ);
+ if (ret < 0)
+ goto exit;
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH0, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH1, &med);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_CH2, &high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT0, &cnt_low);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT1, &cnt_high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, RESET);
+ if (ret < 0)
+ goto exit;
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ di->bat->interval_charging * HZ);
+ } else { /* discharging */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, EN_ACC_RESET_ON_READ);
+ if (ret < 0)
+ goto exit;
+ /* Read CC Sample conversion value Low and high */
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH0, &low);
+ if (ret < 0)
+ goto exit;
+
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH1, &med);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_DIS_CH2, &high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT0, &cnt_low);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_get_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC,
+ AB5500_FG_VAL_COUNT1, &cnt_high);
+ if (ret < 0)
+ goto exit;
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ ACC_RESET_ON_READ, RESET);
+ if (ret < 0)
+ goto exit;
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ di->bat->interval_not_charging * HZ);
+ }
+ di->fg_samples = (cnt_low | (cnt_high << 8));
+ /*
+ * TODO: Workaround due to the hardware issue that accumulator is not
+ * reset after setting reset_on_read bit and reading the accumulator
+ * Registers.
+ */
+ if (prev_samples > di->fg_samples) {
+ /* overflow has occured */
+ sample = (0xFFFF - prev_samples) + di->fg_samples;
+ } else
+ sample = di->fg_samples - prev_samples;
+ prev_samples = di->fg_samples;
+ di->fg_samples = sample;
+ val = (low | (med << 8) | (high << 16));
+ /*
+ * TODO: Workaround due to the hardware issue that accumulator is not
+ * reset after setting reset_on_read bit and reading the accumulator
+ * Registers.
+ */
+ if (prev_val > val)
+ raw_val = (0xFFFFFF - prev_val) + val;
+ else
+ raw_val = val - prev_val;
+ prev_val = val;
+ val = raw_val;
+
+ if (di->fg_samples) {
+ di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X100)/100000;
+ di->avg_curr = (val * FG_LSB_IN_MA) / (di->fg_samples * 1000);
+ } else
+ dev_err(di->dev,
+ "samples is zero, using previous calculated average current\n");
+ di->flags.conv_done = true;
+ di->calib_state = AB5500_FG_CALIB_END;
+
+ mutex_unlock(&di->cc_lock);
+
+ queue_work(di->fg_wq, &di->fg_work);
+
+ return;
+exit:
+ dev_err(di->dev,
+ "Failed to read or write gas gauge registers\n");
+ mutex_unlock(&di->cc_lock);
+ queue_work(di->fg_wq, &di->fg_work);
+}
+
+/**
+ * ab5500_fg_bat_voltage() - get battery voltage
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery voltage(on success) else error code
+ */
+static int ab5500_fg_bat_voltage(struct ab5500_fg *di)
+{
+ int vbat;
+ static int prev;
+
+ vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+ if (vbat < 0) {
+ dev_err(di->dev,
+ "%s gpadc conversion failed, using previous value\n",
+ __func__);
+ return prev;
+ }
+
+ prev = vbat;
+ return vbat;
+}
+
+/**
+ * ab5500_fg_volt_to_capacity() - Voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ * @voltage: The voltage to convert to a capacity
+ *
+ * Returns battery capacity in per mille based on voltage
+ */
+static int ab5500_fg_volt_to_capacity(struct ab5500_fg *di, int voltage)
+{
+ int i, tbl_size;
+ struct abx500_v_to_cap *tbl;
+ int cap = 0;
+
+ tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
+ tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (di->vbat < tbl[i].voltage && di->vbat > tbl[i+1].voltage)
+ di->v_to_cap = tbl[i].capacity;
+ }
+
+ for (i = 0; i < tbl_size; ++i) {
+ if (voltage > tbl[i].voltage)
+ break;
+ }
+
+ if ((i > 0) && (i < tbl_size)) {
+ cap = interpolate(voltage,
+ tbl[i].voltage,
+ tbl[i].capacity * 10,
+ tbl[i-1].voltage,
+ tbl[i-1].capacity * 10);
+ } else if (i == 0) {
+ cap = 1000;
+ } else {
+ cap = 0;
+ }
+
+ dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
+ __func__, voltage, cap);
+
+ return cap;
+}
+
+/**
+ * ab5500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is not compensated
+ * for the voltage drop due to the load
+ */
+static int ab5500_fg_uncomp_volt_to_capacity(struct ab5500_fg *di)
+{
+ di->vbat = ab5500_fg_bat_voltage(di);
+ return ab5500_fg_volt_to_capacity(di, di->vbat);
+}
+
+/**
+ * ab5500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Returns battery capacity based on battery voltage that is load compensated
+ * for the voltage drop
+ */
+static int ab5500_fg_load_comp_volt_to_capacity(struct ab5500_fg *di)
+{
+ int vbat_comp;
+
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ di->vbat = ab5500_fg_bat_voltage(di);
+
+ /* Use Ohms law to get the load compensated voltage */
+ vbat_comp = di->vbat - (di->inst_curr *
+ di->bat->bat_type[di->bat->batt_id].battery_resistance) / 1000;
+
+ dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
+ "R: %dmOhm, Current: %dmA\n",
+ __func__,
+ di->vbat,
+ vbat_comp,
+ di->bat->bat_type[di->bat->batt_id].battery_resistance,
+ di->inst_curr);
+
+ return ab5500_fg_volt_to_capacity(di, vbat_comp);
+}
+
+/**
+ * ab5500_fg_convert_mah_to_permille() - Capacity in mAh to permille
+ * @di: pointer to the ab5500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in permille
+ */
+static int ab5500_fg_convert_mah_to_permille(struct ab5500_fg *di, int cap_mah)
+{
+ return (cap_mah * 1000) / di->bat_cap.max_mah_design;
+}
+
+/**
+ * ab5500_fg_convert_permille_to_mah() - Capacity in permille to mAh
+ * @di: pointer to the ab5500_fg structure
+ * @cap_pm: capacity in permille
+ *
+ * Converts capacity in permille to capacity in mAh
+ */
+static int ab5500_fg_convert_permille_to_mah(struct ab5500_fg *di, int cap_pm)
+{
+ return cap_pm * di->bat_cap.max_mah_design / 1000;
+}
+
+/**
+ * ab5500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
+ * @di: pointer to the ab5500_fg structure
+ * @cap_mah: capacity in mAh
+ *
+ * Converts capacity in mAh to capacity in uWh
+ */
+static int ab5500_fg_convert_mah_to_uwh(struct ab5500_fg *di, int cap_mah)
+{
+ u64 div_res;
+ u32 div_rem;
+
+ div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
+ div_rem = do_div(div_res, 1000);
+
+ /* Make sure to round upwards if necessary */
+ if (div_rem >= 1000 / 2)
+ div_res++;
+
+ return (int) div_res;
+}
+
+/**
+ * ab5500_fg_calc_cap_charging() - Calculate remaining capacity while charging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. The filter is filled with this capacity
+ */
+static int ab5500_fg_calc_cap_charging(struct ab5500_fg *di)
+{
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ /*
+ * We force capacity to 100% as long as the algorithm
+ * reports that it's full.
+ */
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
+ di->flags.fully_charged)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ /* We need to update battery voltage and inst current when charging */
+ di->vbat = ab5500_fg_bat_voltage(di);
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
+ * @di: pointer to the ab5500_fg structure
+ * @comp: if voltage should be load compensated before capacity calc
+ *
+ * Return the capacity in mAh based on the battery voltage. The voltage can
+ * either be load compensated or not. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab5500_fg_calc_cap_discharge_voltage(struct ab5500_fg *di, bool comp)
+{
+ int permille, mah;
+
+ if (comp)
+ permille = ab5500_fg_load_comp_volt_to_capacity(di);
+ else
+ permille = ab5500_fg_uncomp_volt_to_capacity(di);
+
+ mah = ab5500_fg_convert_permille_to_mah(di, permille);
+
+ di->bat_cap.mah = ab5500_fg_add_cap_sample(di, mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Return the capacity in mAh based on previous calculated capcity and the FG
+ * accumulator register value. This value is added to the filter and a
+ * new mean value is calculated and returned.
+ */
+static int ab5500_fg_calc_cap_discharge_fg(struct ab5500_fg *di)
+{
+ int permille_volt, permille;
+
+ dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
+ __func__,
+ di->bat_cap.mah,
+ di->accu_charge);
+
+ /* Capacity should not be less than 0 */
+ if (di->bat_cap.mah + di->accu_charge > 0)
+ di->bat_cap.mah += di->accu_charge;
+ else
+ di->bat_cap.mah = 0;
+
+ if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
+ di->bat_cap.mah = di->bat_cap.max_mah_design;
+
+ /*
+ * Check against voltage based capacity. It can not be lower
+ * than what the uncompensated voltage says
+ */
+ permille = ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ permille_volt = ab5500_fg_uncomp_volt_to_capacity(di);
+
+ if (permille < permille_volt) {
+ di->bat_cap.permille = permille_volt;
+ di->bat_cap.mah = ab5500_fg_convert_permille_to_mah(di,
+ di->bat_cap.permille);
+
+ dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
+ __func__,
+ permille,
+ permille_volt);
+
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ } else {
+ ab5500_fg_fill_cap_sample(di, di->bat_cap.mah);
+ di->bat_cap.permille =
+ ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
+ }
+
+ return di->bat_cap.mah;
+}
+
+/**
+ * ab5500_fg_capacity_level() - Get the battery capacity level
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Get the battery capacity level based on the capacity in percent
+ */
+static int ab5500_fg_capacity_level(struct ab5500_fg *di)
+{
+ int ret, percent;
+
+ percent = di->bat_cap.permille / 10;
+
+ if (percent <= di->bat->cap_levels->critical ||
+ di->flags.low_bat)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (percent <= di->bat->cap_levels->low)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (percent <= di->bat->cap_levels->normal)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ else if (percent <= di->bat->cap_levels->high)
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+ else
+ ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ return ret;
+}
+
+/**
+ * ab5500_fg_check_capacity_limits() - Check if capacity has changed
+ * @di: pointer to the ab5500_fg structure
+ * @init: capacity is allowed to go up in init mode
+ *
+ * Check if capacity or capacity limit has changed and notify the system
+ * about it using the power_supply framework
+ */
+static void ab5500_fg_check_capacity_limits(struct ab5500_fg *di, bool init)
+{
+ bool changed = false;
+
+ di->bat_cap.level = ab5500_fg_capacity_level(di);
+
+ if (di->bat_cap.level != di->bat_cap.prev_level) {
+ /*
+ * We do not allow reported capacity level to go up
+ * unless we're charging or if we're in init
+ */
+ if (!(!di->flags.charging && di->bat_cap.level >
+ di->bat_cap.prev_level) || init) {
+ dev_dbg(di->dev, "level changed from %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ di->bat_cap.prev_level = di->bat_cap.level;
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "level not allowed to go up "
+ "since no charger is connected: %d to %d\n",
+ di->bat_cap.prev_level,
+ di->bat_cap.level);
+ }
+ }
+
+ /*
+ * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
+ * shutdown
+ */
+ if (di->flags.low_bat) {
+ dev_dbg(di->dev, "Battery low, set capacity to 0\n");
+ di->bat_cap.prev_percent = 0;
+ di->bat_cap.permille = 0;
+ di->bat_cap.prev_mah = 0;
+ di->bat_cap.mah = 0;
+ changed = true;
+ } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
+ if (di->bat_cap.permille / 10 == 0) {
+ /*
+ * We will not report 0% unless we've got
+ * the LOW_BAT IRQ, no matter what the FG
+ * algorithm says.
+ */
+ di->bat_cap.prev_percent = 1;
+ di->bat_cap.permille = 1;
+ di->bat_cap.prev_mah = 1;
+ di->bat_cap.mah = 1;
+
+ changed = true;
+ } else if (!(!di->flags.charging &&
+ (di->bat_cap.permille / 10) >
+ di->bat_cap.prev_percent) || init) {
+ /*
+ * We do not allow reported capacity to go up
+ * unless we're charging or if we're in init
+ */
+ dev_dbg(di->dev,
+ "capacity changed from %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+ di->bat_cap.prev_mah = di->bat_cap.mah;
+
+ changed = true;
+ } else {
+ dev_dbg(di->dev, "capacity not allowed to go up since "
+ "no charger is connected: %d to %d (%d)\n",
+ di->bat_cap.prev_percent,
+ di->bat_cap.permille / 10,
+ di->bat_cap.permille);
+ }
+ }
+
+ if (changed)
+ power_supply_changed(&di->fg_psy);
+
+}
+
+static void ab5500_fg_charge_state_to(struct ab5500_fg *di,
+ enum ab5500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
+ di->charge_state,
+ charge_state[di->charge_state],
+ new_state,
+ charge_state[new_state]);
+
+ di->charge_state = new_state;
+}
+
+static void ab5500_fg_discharge_state_to(struct ab5500_fg *di,
+ enum ab5500_fg_charge_state new_state)
+{
+ dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
+ di->discharge_state,
+ discharge_state[di->discharge_state],
+ new_state,
+ discharge_state[new_state]);
+
+ di->discharge_state = new_state;
+}
+
+/**
+ * ab5500_fg_algorithm_charging() - FG algorithm for when charging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're charging
+ */
+static void ab5500_fg_algorithm_charging(struct ab5500_fg *di)
+{
+ /*
+ * If we change to discharge mode
+ * we should start with recovery
+ */
+ if (di->discharge_state != AB5500_FG_DISCHARGE_INIT_RECOVERY)
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_INIT_RECOVERY);
+
+ switch (di->charge_state) {
+ case AB5500_FG_CHARGE_INIT:
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_charging);
+
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_READOUT);
+
+ break;
+
+ case AB5500_FG_CHARGE_READOUT:
+ /*
+ * Read the FG and calculate the new capacity
+ */
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ ab5500_fg_calc_cap_charging(di);
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* Check capacity limits */
+ ab5500_fg_check_capacity_limits(di, false);
+}
+
+/**
+ * ab5500_fg_algorithm_discharging() - FG algorithm for when discharging
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Battery capacity calculation state machine for when we're discharging
+ */
+static void ab5500_fg_algorithm_discharging(struct ab5500_fg *di)
+{
+ int sleep_time;
+
+ /* If we change to charge mode we should start with init */
+ if (di->charge_state != AB5500_FG_CHARGE_INIT)
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+
+ switch (di->discharge_state) {
+ case AB5500_FG_DISCHARGE_INIT:
+ /* We use the FG IRQ to work on */
+ di->init_cnt = 0;
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_INITMEASURING);
+
+ /* Intentional fallthrough */
+ case AB5500_FG_DISCHARGE_INITMEASURING:
+ /*
+ * Discard a number of samples during startup.
+ * After that, use compensated voltage for a few
+ * samples to get an initial capacity.
+ * Then go to READOUT
+ */
+ sleep_time = di->bat->fg_params->init_timer;
+
+ /* Discard the first [x] seconds */
+ if (di->init_cnt >
+ di->bat->fg_params->init_discard_time) {
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+
+ ab5500_fg_check_capacity_limits(di, true);
+ }
+
+ di->init_cnt += sleep_time;
+ if (di->init_cnt >
+ di->bat->fg_params->init_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB5500_FG_DISCHARGE_INIT_RECOVERY:
+ di->recovery_cnt = 0;
+ di->recovery_needed = true;
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_RECOVERY);
+
+ /* Intentional fallthrough */
+
+ case AB5500_FG_DISCHARGE_RECOVERY:
+ sleep_time = di->bat->fg_params->recovery_sleep_timer;
+
+ /*
+ * We should check the power consumption
+ * If low, go to READOUT (after x min) or
+ * RECOVERY_SLEEP if time left.
+ * If high, go to READOUT
+ */
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ if (ab5500_fg_is_low_curr(di, di->inst_curr)) {
+ if (di->recovery_cnt >
+ di->bat->fg_params->recovery_total_time) {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ di->recovery_needed = false;
+ } else {
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ sleep_time * HZ);
+ }
+ di->recovery_cnt += sleep_time;
+ } else {
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_READOUT);
+ }
+
+ break;
+
+ case AB5500_FG_DISCHARGE_READOUT:
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ if (ab5500_fg_is_low_curr(di, di->inst_curr)) {
+ /* Detect mode change */
+ if (di->high_curr_mode) {
+ di->high_curr_mode = false;
+ di->high_curr_cnt = 0;
+ }
+
+ if (di->recovery_needed) {
+ ab5500_fg_discharge_state_to(di,
+ AB5500_FG_DISCHARGE_RECOVERY);
+
+ queue_delayed_work(di->fg_wq,
+ &di->fg_periodic_work,
+ 0);
+
+ break;
+ }
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ } else {
+ mutex_lock(&di->cc_lock);
+ if (!di->flags.conv_done) {
+ /* Wasn't the CC IRQ that got us here */
+ mutex_unlock(&di->cc_lock);
+ dev_dbg(di->dev, "%s CC conv not done\n",
+ __func__);
+
+ break;
+ }
+ di->flags.conv_done = false;
+ mutex_unlock(&di->cc_lock);
+
+ /* Detect mode change */
+ if (!di->high_curr_mode) {
+ di->high_curr_mode = true;
+ di->high_curr_cnt = 0;
+ }
+
+ di->high_curr_cnt +=
+ di->bat->fg_params->accu_high_curr;
+ if (di->high_curr_cnt >
+ di->bat->fg_params->high_curr_time)
+ di->recovery_needed = true;
+
+ ab5500_fg_calc_cap_discharge_fg(di);
+ }
+
+ ab5500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ case AB5500_FG_DISCHARGE_WAKEUP:
+ ab5500_fg_coulomb_counter(di, true);
+ di->inst_curr = ab5500_fg_inst_curr(di);
+
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+
+ di->fg_samples = SEC_TO_SAMPLE(
+ di->bat->fg_params->accu_high_curr);
+ /* Re-program number of samples set above */
+ ab5500_fg_coulomb_counter(di, true);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_READOUT);
+
+ ab5500_fg_check_capacity_limits(di, false);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ab5500_fg_algorithm_calibrate() - Internal columb counter offset calibration
+ * @di: pointer to the ab5500_fg structure
+ *
+ */
+static void ab5500_fg_algorithm_calibrate(struct ab5500_fg *di)
+{
+ int ret;
+
+ switch (di->calib_state) {
+ case AB5500_FG_CALIB_INIT:
+ dev_dbg(di->dev, "Calibration ongoing...\n");
+ /* TODO: For Cut 1.1 no calibration */
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A,
+ FG_ACC_RESET_ON_READ_MASK, FG_ACC_RESET_ON_READ);
+ if (ret)
+ goto err;
+ di->calib_state = AB5500_FG_CALIB_WAIT;
+ break;
+ case AB5500_FG_CALIB_END:
+ di->flags.calibrate = false;
+ dev_dbg(di->dev, "Calibration done...\n");
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ break;
+ case AB5500_FG_CALIB_WAIT:
+ dev_dbg(di->dev, "Calibration WFI\n");
+ default:
+ break;
+ }
+ return;
+err:
+ /* Something went wrong, don't calibrate then */
+ dev_err(di->dev, "failed to calibrate the CC\n");
+ di->flags.calibrate = false;
+ di->calib_state = AB5500_FG_CALIB_INIT;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+}
+
+/**
+ * ab5500_fg_algorithm() - Entry point for the FG algorithm
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Entry point for the battery capacity calculation state machine
+ */
+static void ab5500_fg_algorithm(struct ab5500_fg *di)
+{
+ if (di->flags.calibrate)
+ ab5500_fg_algorithm_calibrate(di);
+ else {
+ if (di->flags.charging)
+ ab5500_fg_algorithm_charging(di);
+ else
+ ab5500_fg_algorithm_discharging(di);
+ }
+
+ dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+ "%d %d %d %d %d %d %d\n",
+ di->bat_cap.max_mah_design,
+ di->bat_cap.mah,
+ di->bat_cap.permille,
+ di->bat_cap.level,
+ di->bat_cap.prev_mah,
+ di->bat_cap.prev_percent,
+ di->bat_cap.prev_level,
+ di->vbat,
+ di->inst_curr,
+ di->avg_curr,
+ di->accu_charge,
+ di->flags.charging,
+ di->charge_state,
+ di->discharge_state,
+ di->high_curr_mode,
+ di->recovery_needed);
+}
+
+/**
+ * ab5500_fg_periodic_work() - Run the FG state machine periodically
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for periodic work
+ */
+static void ab5500_fg_periodic_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_periodic_work.work);
+
+ if (di->init_capacity) {
+ /* A dummy read that will return 0 */
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ /* Get an initial capacity calculation */
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ ab5500_fg_check_capacity_limits(di, true);
+ di->init_capacity = false;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ } else
+ ab5500_fg_algorithm(di);
+}
+
+/**
+ * ab5500_fg_low_bat_work() - Check LOW_BAT condition
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for checking the LOW_BAT condition
+ */
+static void ab5500_fg_low_bat_work(struct work_struct *work)
+{
+ int vbat;
+
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_low_bat_work.work);
+
+ vbat = ab5500_fg_bat_voltage(di);
+
+ /* Check if LOW_BAT still fulfilled */
+ if (vbat < di->bat->fg_params->lowbat_threshold) {
+ di->flags.low_bat = true;
+ dev_warn(di->dev, "Battery voltage still LOW\n");
+
+ /*
+ * We need to re-schedule this check to be able to detect
+ * if the voltage increases again during charging
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ power_supply_changed(&di->fg_psy);
+ } else {
+ di->flags.low_bat = false;
+ dev_warn(di->dev, "Battery voltage OK again\n");
+ power_supply_changed(&di->fg_psy);
+ }
+
+ /* This is needed to dispatch LOW_BAT */
+ ab5500_fg_check_capacity_limits(di, false);
+
+ /* Set this flag to check if LOW_BAT IRQ still occurs */
+ di->flags.low_bat_delay = false;
+}
+
+/**
+ * ab5500_fg_instant_work() - Run the FG state machine instantly
+ * @work: pointer to the work_struct structure
+ *
+ * Work queue function for instant work
+ */
+static void ab5500_fg_instant_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg, fg_work);
+
+ ab5500_fg_algorithm(di);
+}
+
+/**
+ * ab5500_fg_get_property() - get the fg properties
+ * @psy: pointer to the power_supply structure
+ * @psp: pointer to the power_supply_property structure
+ * @val: pointer to the power_supply_propval union
+ *
+ * This function gets called when an application tries to get the
+ * fg properties by reading the sysfs files.
+ * voltage_now: battery voltage
+ * current_now: battery instant current
+ * current_avg: battery average current
+ * charge_full_design: capacity where battery is considered full
+ * charge_now: battery capacity in nAh
+ * capacity: capacity in percent
+ * capacity_level: capacity level
+ *
+ * Returns error code in case of failure else 0 on success
+ */
+static int ab5500_fg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ab5500_fg *di;
+
+ di = to_ab5500_fg_device_info(psy);
+
+ /*
+ * If battery is identified as unknown and charging of unknown
+ * batteries is disabled, we always report 100% capacity and
+ * capacity level UNKNOWN, since we can't calculate
+ * remaining capacity
+ */
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (di->flags.bat_ovv)
+ val->intval = 47500000;
+ else {
+ di->vbat = ab5500_gpadc_convert
+ (di->gpadc, MAIN_BAT_V);
+ val->intval = di->vbat * 1000;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ di->inst_curr = ab5500_fg_inst_curr(di);
+ val->intval = di->inst_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = di->avg_curr * 1000;
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah_design);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ break;
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.max_mah);
+ else
+ val->intval = ab5500_fg_convert_mah_to_uwh(di,
+ di->bat_cap.prev_mah);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = di->bat_cap.max_mah_design;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = di->bat_cap.max_mah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = di->bat_cap.max_mah;
+ else
+ val->intval = di->bat_cap.prev_mah;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = 100;
+ else
+ val->intval = di->bat_cap.prev_percent;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ if (di->flags.batt_unknown && !di->bat->chg_unknown_bat)
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ else
+ val->intval = di->bat_cap.prev_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ab5500_fg_get_ext_psy_data(struct device *dev, void *data)
+{
+ struct power_supply *psy;
+ struct power_supply *ext;
+ struct ab5500_fg *di;
+ union power_supply_propval ret;
+ int i, j;
+ bool psy_found = false;
+
+ psy = (struct power_supply *)data;
+ ext = dev_get_drvdata(dev);
+ di = to_ab5500_fg_device_info(psy);
+
+ /*
+ * For all psy where the name of your driver
+ * appears in any supplied_to
+ */
+ for (i = 0; i < ext->num_supplicants; i++) {
+ if (!strcmp(ext->supplied_to[i], psy->name))
+ psy_found = true;
+ }
+
+ if (!psy_found)
+ return 0;
+
+ /* Go through all properties for the psy */
+ for (j = 0; j < ext->num_properties; j++) {
+ enum power_supply_property prop;
+ prop = ext->properties[j];
+
+ if (ext->get_property(ext, prop, &ret))
+ continue;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ switch (ret.intval) {
+ case POWER_SUPPLY_STATUS_UNKNOWN:
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ if (!di->flags.charging)
+ break;
+ di->flags.charging = false;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (di->flags.fully_charged)
+ break;
+ di->flags.fully_charged = true;
+ /* Save current capacity as maximum */
+ di->bat_cap.max_mah = di->bat_cap.mah;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ if (di->flags.charging)
+ break;
+ di->flags.charging = true;
+ di->flags.fully_charged = false;
+ queue_work(di->fg_wq, &di->fg_work);
+ break;
+ };
+ default:
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (ext->type) {
+ case POWER_SUPPLY_TYPE_BATTERY:
+ if (ret.intval)
+ di->flags.batt_unknown = false;
+ else
+ di->flags.batt_unknown = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ab5500_fg_init_hw_registers() - Set up FG related registers
+ * @di: pointer to the ab5500_fg structure
+ *
+ * Set up battery OVV, low battery voltage registers
+ */
+static int ab5500_fg_init_hw_registers(struct ab5500_fg *di)
+{
+ int ret;
+ struct adc_auto_input *auto_ip;
+
+ auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL);
+ if (!auto_ip) {
+ dev_err(di->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ auto_ip->mux = MAIN_BAT_V;
+ auto_ip->freq = MS500;
+ auto_ip->min = di->bat->fg_params->lowbat_threshold;
+ auto_ip->max = di->bat->fg_params->overbat_threshold;
+ auto_ip->auto_adc_callback = ab5500_fg_bat_v_trig;
+ di->gpadc_auto = auto_ip;
+ ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto);
+ if (ret)
+ dev_err(di->dev,
+ "failed to set auto trigger for battery votlage\n");
+ /* set End Of Charge current to 247mA */
+ ret = abx500_set_register_interruptible(di->dev,
+ AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_EOC, EOC_52_mA);
+ return ret;
+}
+
+static int ab5500_fg_bat_v_trig(int mux)
+{
+ struct ab5500_fg *di = ab5500_fg_get();
+
+ di->vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V);
+
+ /* check if the battery voltage is below low threshold */
+ if (di->vbat < di->bat->fg_params->lowbat_threshold) {
+ dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
+ di->flags.low_bat_delay = true;
+ /*
+ * Start a timer to check LOW_BAT again after some time
+ * This is done to avoid shutdown on single voltage dips
+ */
+ queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+ round_jiffies(LOW_BAT_CHECK_INTERVAL));
+ power_supply_changed(&di->fg_psy);
+ }
+ /* check if battery votlage is above OVV */
+ else if (di->vbat > di->bat->fg_params->overbat_threshold) {
+ dev_warn(di->dev, "Battery OVV\n");
+ di->flags.bat_ovv = true;
+
+ power_supply_changed(&di->fg_psy);
+ } else
+ dev_err(di->dev,
+ "Invalid gpadc auto trigger for battery voltage\n");
+
+ kfree(di->gpadc_auto);
+ ab5500_fg_init_hw_registers(di);
+ return 0;
+}
+
+/**
+ * ab5500_fg_external_power_changed() - callback for power supply changes
+ * @psy: pointer to the structure power_supply
+ *
+ * This function is the entry point of the pointer external_power_changed
+ * of the structure power_supply.
+ * This function gets executed when there is a change in any external power
+ * supply that this driver needs to be notified of.
+ */
+static void ab5500_fg_external_power_changed(struct power_supply *psy)
+{
+ struct ab5500_fg *di = to_ab5500_fg_device_info(psy);
+
+ class_for_each_device(power_supply_class, NULL,
+ &di->fg_psy, ab5500_fg_get_ext_psy_data);
+}
+
+/**
+ * abab5500_fg_reinit_work() - work to reset the FG algorithm
+ * @work: pointer to the work_struct structure
+ *
+ * Used to reset the current battery capacity to be able to
+ * retrigger a new voltage base capacity calculation. For
+ * test and verification purpose.
+ */
+static void ab5500_fg_reinit_work(struct work_struct *work)
+{
+ struct ab5500_fg *di = container_of(work, struct ab5500_fg,
+ fg_reinit_work.work);
+
+ if (di->flags.calibrate == false) {
+ dev_dbg(di->dev, "Resetting FG state machine to init.\n");
+ ab5500_fg_clear_cap_samples(di);
+ ab5500_fg_calc_cap_discharge_voltage(di, true);
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT);
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+
+ } else {
+ dev_err(di->dev,
+ "Residual offset calibration ongoing retrying..\n");
+ /* Wait one second until next try*/
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
+ round_jiffies(1));
+ }
+}
+
+/**
+ * ab5500_fg_reinit() - forces FG algorithm to reinitialize with current values
+ *
+ * This function can be used to force the FG algorithm to recalculate a new
+ * voltage based battery capacity.
+ */
+void ab5500_fg_reinit(void)
+{
+ struct ab5500_fg *di = ab5500_fg_get();
+ /* User won't be notified if a null pointer returned. */
+ if (di != NULL)
+ queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
+}
+
+#if defined(CONFIG_PM)
+static int ab5500_fg_resume(struct platform_device *pdev)
+{
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ /*
+ * Change state if we're not charging. If we're charging we will wake
+ * up on the FG IRQ
+ */
+ if (!di->flags.charging) {
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_WAKEUP);
+ queue_work(di->fg_wq, &di->fg_work);
+ }
+
+ return 0;
+}
+
+static int ab5500_fg_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ flush_delayed_work(&di->fg_periodic_work);
+
+ /*
+ * If the FG is enabled we will disable it before going to suspend
+ * only if we're not charging
+ */
+ if (di->flags.fg_enabled && !di->flags.charging)
+ ab5500_fg_coulomb_counter(di, false);
+
+ return 0;
+}
+#else
+#define ab5500_fg_suspend NULL
+#define ab5500_fg_resume NULL
+#endif
+
+static int __devexit ab5500_fg_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ab5500_fg *di = platform_get_drvdata(pdev);
+
+ /* Disable coulomb counter */
+ ret = ab5500_fg_coulomb_counter(di, false);
+ if (ret)
+ dev_err(di->dev, "failed to disable coulomb counter\n");
+
+ destroy_workqueue(di->fg_wq);
+
+ flush_scheduled_work();
+ power_supply_unregister(&di->fg_psy);
+ platform_set_drvdata(pdev, NULL);
+ kfree(di->gpadc_auto);
+ kfree(di);
+ return ret;
+}
+
+static int __devinit ab5500_fg_probe(struct platform_device *pdev)
+{
+ struct abx500_bm_plat_data *plat_data;
+ int ret = 0;
+
+ struct ab5500_fg *di =
+ kzalloc(sizeof(struct ab5500_fg), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ mutex_init(&di->cc_lock);
+
+ /* get parent data */
+ di->dev = &pdev->dev;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ di->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+
+ plat_data = pdev->dev.platform_data;
+ di->pdata = plat_data->fg;
+ di->bat = plat_data->battery;
+
+ /* get fg specific platform data */
+ if (!di->pdata) {
+ dev_err(di->dev, "no fg platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+
+ /* get battery specific platform data */
+ if (!di->bat) {
+ dev_err(di->dev, "no battery platform data supplied\n");
+ ret = -EINVAL;
+ goto free_device_info;
+ }
+ /* powerup fg to start sampling */
+ ab5500_fg_coulomb_counter(di, true);
+
+ di->fg_psy.name = "ab5500_fg";
+ di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->fg_psy.properties = ab5500_fg_props;
+ di->fg_psy.num_properties = ARRAY_SIZE(ab5500_fg_props);
+ di->fg_psy.get_property = ab5500_fg_get_property;
+ di->fg_psy.supplied_to = di->pdata->supplied_to;
+ di->fg_psy.num_supplicants = di->pdata->num_supplicants;
+ di->fg_psy.external_power_changed = ab5500_fg_external_power_changed;
+
+ di->bat_cap.max_mah_design = MILLI_TO_MICRO *
+ di->bat->bat_type[di->bat->batt_id].charge_full_design;
+
+ di->bat_cap.max_mah = di->bat_cap.max_mah_design;
+
+ di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+
+ di->init_capacity = true;
+
+ ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT);
+ ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT);
+
+ /* Create a work queue for running the FG algorithm */
+ di->fg_wq = create_singlethread_workqueue("ab5500_fg_wq");
+ if (di->fg_wq == NULL) {
+ dev_err(di->dev, "failed to create work queue\n");
+ goto free_device_info;
+ }
+
+ /* Init work for running the fg algorithm instantly */
+ INIT_WORK(&di->fg_work, ab5500_fg_instant_work);
+
+ /* Init work for getting the battery accumulated current */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_acc_cur_work,
+ ab5500_fg_acc_cur_work);
+
+ /* Init work for reinitialising the fg algorithm */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+ ab5500_fg_reinit_work);
+
+ /* Work delayed Queue to run the state machine */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ ab5500_fg_periodic_work);
+
+ /* Work to check low battery condition */
+ INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ ab5500_fg_low_bat_work);
+
+ list_add_tail(&di->node, &ab5500_fg_list);
+
+ /* Consider battery unknown until we're informed otherwise */
+ di->flags.batt_unknown = true;
+
+ /* Register FG power supply class */
+ ret = power_supply_register(di->dev, &di->fg_psy);
+ if (ret) {
+ dev_err(di->dev, "failed to register FG psy\n");
+ goto free_fg_wq;
+ }
+
+ /* Initialize OVV, and other registers */
+ ret = ab5500_fg_init_hw_registers(di);
+ if (ret) {
+ dev_err(di->dev, "failed to initialize registers\n");
+ goto pow_unreg;
+ }
+
+ di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+
+ /* Initilialize avg current timer */
+ init_timer(&di->avg_current_timer);
+ di->avg_current_timer.function = ab5500_fg_acc_cur_timer_expired;
+ di->avg_current_timer.data = (unsigned long) di;
+ di->avg_current_timer.expires = 60 * HZ;
+ if (!timer_pending(&di->avg_current_timer))
+ add_timer(&di->avg_current_timer);
+ else
+ mod_timer(&di->avg_current_timer, 60 * HZ);
+
+ platform_set_drvdata(pdev, di);
+
+ /* Calibrate the fg first time */
+ di->flags.calibrate = true;
+ di->calib_state = AB5500_FG_CALIB_INIT;
+ /* Run the FG algorithm */
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work,
+ FG_PERIODIC_START_INTERVAL);
+ queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work,
+ FG_PERIODIC_START_INTERVAL);
+
+ dev_info(di->dev, "probe success\n");
+ return ret;
+
+pow_unreg:
+ power_supply_unregister(&di->fg_psy);
+free_fg_wq:
+ destroy_workqueue(di->fg_wq);
+free_device_info:
+ kfree(di);
+
+ return ret;
+}
+
+static struct platform_driver ab5500_fg_driver = {
+ .probe = ab5500_fg_probe,
+ .remove = __devexit_p(ab5500_fg_remove),
+ .suspend = ab5500_fg_suspend,
+ .resume = ab5500_fg_resume,
+ .driver = {
+ .name = "ab5500-fg",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_fg_init(void)
+{
+ return platform_driver_register(&ab5500_fg_driver);
+}
+
+static void __exit ab5500_fg_exit(void)
+{
+ platform_driver_unregister(&ab5500_fg_driver);
+}
+
+subsys_initcall_sync(ab5500_fg_init);
+module_exit(ab5500_fg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
+MODULE_ALIAS("platform:ab5500-fg");
+MODULE_DESCRIPTION("AB5500 Fuel Gauge driver");
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb99394ac..5e5700cf24a 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -83,6 +83,7 @@ struct ab8500_btemp_ranges {
* @btemp_ranges: Battery temperature range structure
* @btemp_wq: Work queue for measuring the temperature periodically
* @btemp_periodic_work: Work for measuring the temperature periodically
+ * @initialized: True if battery id read.
*/
struct ab8500_btemp {
struct device *dev;
@@ -100,6 +101,7 @@ struct ab8500_btemp {
struct ab8500_btemp_ranges btemp_ranges;
struct workqueue_struct *btemp_wq;
struct delayed_work btemp_periodic_work;
+ bool initialized;
};
/* BTEMP power supply properties */
@@ -569,6 +571,13 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
struct ab8500_btemp *di = container_of(work,
struct ab8500_btemp, btemp_periodic_work.work);
+ if (!di->initialized) {
+ di->initialized = true;
+ /* Identify the battery */
+ if (ab8500_btemp_id(di) < 0)
+ dev_warn(di->dev, "failed to identify the battery\n");
+ }
+
di->bat_temp = ab8500_btemp_measure_temp(di);
if (di->bat_temp != di->prev_bat_temp) {
@@ -964,7 +973,7 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
{
int irq, i, ret = 0;
u8 val;
- struct abx500_bm_plat_data *plat_data;
+ struct ab8500_platform_data *plat_data;
struct ab8500_btemp *di =
kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
@@ -976,8 +985,10 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
di->parent = dev_get_drvdata(pdev->dev.parent);
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+ di->initialized = false;
+
/* get btemp specific platform data */
- plat_data = pdev->dev.platform_data;
+ plat_data = dev_get_platdata(di->parent->dev);
di->pdata = plat_data->btemp;
if (!di->pdata) {
dev_err(di->dev, "no btemp platform data supplied\n");
@@ -1017,10 +1028,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
ab8500_btemp_periodic_work);
- /* Identify the battery */
- if (ab8500_btemp_id(di) < 0)
- dev_warn(di->dev, "failed to identify the battery\n");
-
/* Set BTEMP thermal limits. Low and Med are fixed */
di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4accbec8..19f62729a0a 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -29,6 +29,7 @@
#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/usb/otg.h>
+#include <asm/mach-types.h>
/* Charger constants */
#define NO_PW_CONN 0
@@ -77,6 +78,9 @@
/* Lowest charger voltage is 3.39V -> 0x4E */
#define LOW_VOLT_REG 0x4E
+/* Step up/down delay in us */
+#define STEP_UDELAY 1000
+
/* UsbLineStatus register - usb types */
enum ab8500_charger_link_status {
USB_STAT_NOT_CONFIGURED,
@@ -934,6 +938,88 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
}
/**
+ * ab8500_charger_set_current() - set charger current
+ * @di: pointer to the ab8500_charger structure
+ * @ich: charger current, in mA
+ * @reg: select what charger register to set
+ *
+ * Set charger current.
+ * There is no state machine in the AB to step up/down the charger
+ * current to avoid dips and spikes on MAIN, VBUS and VBAT when
+ * charging is started. Instead we need to implement
+ * this charger current step-up/down here.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_current(struct ab8500_charger *di,
+ int ich, int reg)
+{
+ int ret, i;
+ int curr_index, prev_curr_index, shift_value;
+ u8 reg_value;
+
+ switch (reg) {
+ case AB8500_MCH_IPT_CURLVL_REG:
+ shift_value = MAIN_CH_INPUT_CURR_SHIFT;
+ curr_index = ab8500_current_to_regval(ich);
+ break;
+ case AB8500_USBCH_IPT_CRNTLVL_REG:
+ shift_value = VBUS_IN_CURR_LIM_SHIFT;
+ curr_index = ab8500_vbus_in_curr_to_regval(ich);
+ break;
+ case AB8500_CH_OPT_CRNTLVL_REG:
+ shift_value = 0;
+ curr_index = ab8500_current_to_regval(ich);
+ break;
+ default:
+ dev_err(di->dev, "%s current register not valid\n", __func__);
+ return -ENXIO;
+ }
+
+ if (curr_index < 0) {
+ dev_err(di->dev, "requested current limit out-of-range\n");
+ return -ENXIO;
+ }
+
+ ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ reg, &reg_value);
+ if (ret < 0) {
+ dev_err(di->dev, "%s read failed\n", __func__);
+ return ret;
+ }
+ prev_curr_index = (reg_value >> shift_value);
+
+ /* only update current if it's been changed */
+ if (prev_curr_index == curr_index)
+ return 0;
+
+ dev_dbg(di->dev, "%s set charger current: %d mA for reg: 0x%02x\n",
+ __func__, ich, reg);
+
+ if (prev_curr_index > curr_index) {
+ for (i = prev_curr_index - 1; i >= curr_index; i--) {
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, reg, (u8) i << shift_value);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ usleep_range(STEP_UDELAY, STEP_UDELAY * 2);
+ }
+ } else {
+ for (i = prev_curr_index + 1; i <= curr_index; i++) {
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_CHARGER, reg, (u8) i << shift_value);
+ if (ret) {
+ dev_err(di->dev, "%s write failed\n", __func__);
+ return ret;
+ }
+ usleep_range(STEP_UDELAY, STEP_UDELAY * 2);
+ }
+ }
+ return ret;
+}
+
+/**
* ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
* @di: pointer to the ab8500_charger structure
* @ich_in: charger input current limit
@@ -944,8 +1030,6 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
int ich_in)
{
- int ret;
- int input_curr_index;
int min_value;
/* We should always use to lowest current limit */
@@ -964,19 +1048,38 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
break;
}
- input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
- if (input_curr_index < 0) {
- dev_err(di->dev, "VBUS input current limit too high\n");
- return -ENXIO;
- }
+ return ab8500_charger_set_current(di, min_value,
+ AB8500_USBCH_IPT_CRNTLVL_REG);
+}
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_USBCH_IPT_CRNTLVL_REG,
- input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
- if (ret)
- dev_err(di->dev, "%s write failed\n", __func__);
+/**
+ * ab8500_charger_set_main_in_curr() - set main charger input current
+ * @di: pointer to the ab8500_charger structure
+ * @ich_in: input charger current, in mA
+ *
+ * Set main charger input current.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_main_in_curr(struct ab8500_charger *di,
+ int ich_in)
+{
+ return ab8500_charger_set_current(di, ich_in,
+ AB8500_MCH_IPT_CURLVL_REG);
+}
- return ret;
+/**
+ * ab8500_charger_set_output_curr() - set charger output current
+ * @di: pointer to the ab8500_charger structure
+ * @ich_out: output charger current, in mA
+ *
+ * Set charger output current.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_output_curr(struct ab8500_charger *di,
+ int ich_out)
+{
+ return ab8500_charger_set_current(di, ich_out,
+ AB8500_CH_OPT_CRNTLVL_REG);
}
/**
@@ -1088,18 +1191,19 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
return ret;
}
/* MainChInputCurr: current that can be drawn from the charger*/
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_MCH_IPT_CURLVL_REG,
- input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+ ret = ab8500_charger_set_main_in_curr(di,
+ di->bat->chg_params->ac_curr_max);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s Failed to set MainChInputCurr\n",
+ __func__);
return ret;
}
/* ChOutputCurentLevel: protected output current */
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ ret = ab8500_charger_set_output_curr(di, iset);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
@@ -1156,12 +1260,11 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
return ret;
}
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+ ret = ab8500_charger_set_output_curr(di, 0);
if (ret) {
- dev_err(di->dev,
- "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
} else {
@@ -1264,10 +1367,11 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
return ret;
}
/* ChOutputCurentLevel: protected output current */
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ ret = ab8500_charger_set_output_curr(di, ich_out);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
/* Check if VBAT overshoot control should be enabled */
@@ -1364,7 +1468,6 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
int ich_out)
{
int ret;
- int curr_index;
struct ab8500_charger *di;
if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
@@ -1374,18 +1477,11 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
else
return -ENXIO;
- curr_index = ab8500_current_to_regval(ich_out);
- if (curr_index < 0) {
- dev_err(di->dev,
- "Charger current too high, "
- "charging not started\n");
- return -ENXIO;
- }
-
- ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
- AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+ ret = ab8500_charger_set_output_curr(di, ich_out);
if (ret) {
- dev_err(di->dev, "%s write failed\n", __func__);
+ dev_err(di->dev, "%s "
+ "Failed to set ChOutputCurentLevel\n",
+ __func__);
return ret;
}
@@ -2354,11 +2450,18 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
/* Backup battery voltage and current */
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_RTC,
- AB8500_RTC_BACKUP_CHG_REG,
- di->bat->bkup_bat_v |
- di->bat->bkup_bat_i);
+ if (machine_is_snowball())
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_RTC,
+ AB8500_RTC_BACKUP_CHG_REG,
+ BUP_VCH_SEL_3P1V |
+ BUP_ICH_SEL_150UA);
+ else
+ ret = abx500_set_register_interruptible(di->dev,
+ AB8500_RTC,
+ AB8500_RTC_BACKUP_CHG_REG,
+ di->bat->bkup_bat_v |
+ di->bat->bkup_bat_i);
if (ret) {
dev_err(di->dev, "failed to setup backup battery charging\n");
goto out;
@@ -2534,7 +2637,7 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
static int __devinit ab8500_charger_probe(struct platform_device *pdev)
{
int irq, i, charger_status, ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct ab8500_platform_data *plat_data;
struct ab8500_charger *di =
kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
@@ -2550,7 +2653,8 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
spin_lock_init(&di->usb_state.usb_lock);
/* get charger specific platform data */
- plat_data = pdev->dev.platform_data;
+ plat_data = dev_get_platdata(di->parent->dev);
+
di->pdata = plat_data->charger;
if (!di->pdata) {
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f05657..798f5f7cef4 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -485,8 +485,9 @@ static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
di->flags.fg_enabled = true;
} else {
/* Clear any pending read requests */
- ret = abx500_set_register_interruptible(di->dev,
- AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
+ AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+ (RESET_ACCU | READ_REQ), 0);
if (ret)
goto cc_err;
@@ -1404,8 +1405,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
sleep_time = di->bat->fg_params->init_timer;
/* Discard the first [x] seconds */
- if (di->init_cnt >
- di->bat->fg_params->init_discard_time) {
+ if (di->init_cnt > di->bat->fg_params->init_discard_time) {
ab8500_fg_calc_cap_discharge_voltage(di, true);
ab8500_fg_check_capacity_limits(di, true);
@@ -2446,7 +2446,7 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
{
int i, irq;
int ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct ab8500_platform_data *plat_data;
struct ab8500_fg *di =
kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
@@ -2461,7 +2461,7 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get fg specific platform data */
- plat_data = pdev->dev.platform_data;
+ plat_data = dev_get_platdata(di->parent->dev);
di->pdata = plat_data->fg;
if (!di->pdata) {
dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 804b88c760d..032b27d35bf 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -220,6 +220,7 @@ enum maxim_ret {
*/
struct abx500_chargalg {
struct device *dev;
+ struct ab8500 *parent;
int charge_status;
int eoc_cnt;
int rch_cnt;
@@ -1802,7 +1803,7 @@ static int __devexit abx500_chargalg_remove(struct platform_device *pdev)
static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
{
- struct abx500_bm_plat_data *plat_data;
+ struct ab8500_platform_data *plat_data;
int ret = 0;
struct abx500_chargalg *di =
@@ -1812,8 +1813,8 @@ static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
/* get device struct */
di->dev = &pdev->dev;
-
- plat_data = pdev->dev.platform_data;
+ di->parent = dev_get_drvdata(pdev->dev.parent);
+ plat_data = dev_get_platdata(di->parent->dev);
di->pdata = plat_data->chargalg;
di->bat = plat_data->battery;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 36db5a441eb..7278f1e4141 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -247,6 +247,14 @@ config REGULATOR_AB8500
This driver supports the regulators found on the ST-Ericsson mixed
signal AB8500 PMIC
+config REGULATOR_AB8500_EXT
+ bool "ST-Ericsson AB8500 External Regulators"
+ depends on REGULATOR_AB8500
+ default y if REGULATOR_AB8500
+ help
+ This driver supports the external regulator controls found on the
+ ST-Ericsson mixed signal AB8500 PMIC
+
config REGULATOR_DBX500_PRCMU
bool
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 94b52745e95..b5b90fb232b 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
+obj-$(CONFIG_REGULATOR_AB8500_EXT) += ab8500-ext.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
diff --git a/drivers/regulator/ab5500.c b/drivers/regulator/ab5500.c
new file mode 100644
index 00000000000..99676f7ad8e
--- /dev/null
+++ b/drivers/regulator/ab5500.c
@@ -0,0 +1,650 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Based on ab3100.c.
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/regulator/ab5500.h>
+
+#define AB5500_LDO_VDIGMIC_ST 0x50
+
+#define AB5500_LDO_G_ST 0x78
+#define AB5500_LDO_G_PWR1 0x79
+#define AB5500_LDO_G_PWR0 0x7a
+
+#define AB5500_LDO_H_ST 0x7b
+#define AB5500_LDO_H_PWR1 0x7c
+#define AB5500_LDO_H_PWR0 0x7d
+
+#define AB5500_LDO_K_ST 0x7e
+#define AB5500_LDO_K_PWR1 0x7f
+#define AB5500_LDO_K_PWR0 0x80
+
+#define AB5500_LDO_L_ST 0x81
+#define AB5500_LDO_L_PWR1 0x82
+#define AB5500_LDO_L_PWR0 0x83
+
+/* In SIM bank */
+#define AB5500_SIM_SUP 0x14
+
+#define AB5500_MBIAS1 0x00
+#define AB5500_MBIAS2 0x01
+
+#define AB5500_LDO_MODE_MASK (0x3 << 4)
+#define AB5500_LDO_MODE_FULLPOWER (0x3 << 4)
+#define AB5500_LDO_MODE_PWRCTRL (0x2 << 4)
+#define AB5500_LDO_MODE_LOWPOWER (0x1 << 4)
+#define AB5500_LDO_MODE_OFF (0x0 << 4)
+#define AB5500_LDO_VOLT_MASK 0x07
+
+#define AB5500_MBIAS1_ENABLE (0x1 << 1)
+#define AB5500_MBIAS1_MODE_MASK (0x1 << 1)
+#define AB5500_MBIAS2_ENABLE (0x1 << 1)
+#define AB5500_MBIAS2_VOLT_MASK (0x1 << 2)
+#define AB5500_MBIAS2_MODE_MASK (0x1 << 1)
+
+struct ab5500_regulator {
+ struct regulator_desc desc;
+ const int *voltages;
+ int num_holes;
+ bool off_is_lowpower;
+ bool enabled;
+ int enable_time;
+ int load_lp_uA;
+ u8 bank;
+ u8 reg;
+ u8 mode;
+ u8 update_mask;
+ u8 update_val_idle;
+ u8 update_val_normal;
+ u8 voltage_mask;
+};
+
+struct ab5500_regulators {
+ struct device *dev;
+ struct ab5500_regulator *regulator[AB5500_NUM_REGULATORS];
+ struct regulator_dev *rdev[AB5500_NUM_REGULATORS];
+};
+
+static int ab5500_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ return r->enable_time; /* microseconds */
+}
+
+static int ab5500_regulator_enable(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ int ret;
+
+ ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg,
+ r->update_mask, r->mode);
+ if (ret < 0)
+ return ret;
+
+ r->enabled = true;
+
+ return 0;
+}
+
+static int ab5500_regulator_disable(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int ret;
+
+ if (r->off_is_lowpower)
+ regval = AB5500_LDO_MODE_LOWPOWER;
+ else
+ regval = AB5500_LDO_MODE_OFF;
+
+ ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg,
+ r->update_mask, regval);
+ if (ret < 0)
+ return ret;
+
+ r->enabled = false;
+
+ return 0;
+}
+
+static unsigned int ab5500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ if (r->mode == r->update_val_idle)
+ return REGULATOR_MODE_IDLE;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static unsigned int
+ab5500_regulator_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ unsigned int mode;
+
+ if (load_uA <= r->load_lp_uA)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static int ab5500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ r->mode = r->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ r->mode = r->update_val_idle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (r->enabled)
+ return ab5500_regulator_enable(rdev);
+
+ return 0;
+}
+
+static int ab5500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int err;
+
+ err = abx500_get_register_interruptible(ab5500->dev,
+ r->bank, r->reg, &regval);
+ if (err) {
+ dev_err(rdev_get_dev(rdev), "unable to get register 0x%x\n",
+ r->reg);
+ return err;
+ }
+
+ switch (regval & r->update_mask) {
+ case AB5500_LDO_MODE_PWRCTRL:
+ case AB5500_LDO_MODE_OFF:
+ r->enabled = false;
+ break;
+ case AB5500_LDO_MODE_LOWPOWER:
+ if (r->off_is_lowpower) {
+ r->enabled = false;
+ break;
+ }
+ /* fall through */
+ default:
+ r->enabled = true;
+ break;
+ }
+
+ return r->enabled;
+}
+
+static int
+ab5500_regulator_list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ unsigned n_voltages = r->desc.n_voltages;
+ int selindex;
+ int i;
+
+ for (i = 0, selindex = 0; selindex < n_voltages; i++) {
+ int voltage = r->voltages[i];
+
+ if (!voltage)
+ continue;
+
+ if (selindex == selector)
+ return voltage;
+
+ selindex++;
+ }
+
+ return -EINVAL;
+}
+
+static int ab5500_regulator_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+
+ return r->voltages[0];
+}
+
+static int ab5500_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ u8 regval;
+ int ret;
+
+ ret = abx500_get_register_interruptible(ab5500->dev,
+ r->bank, r->reg, &regval);
+ if (ret) {
+ dev_warn(rdev_get_dev(rdev),
+ "failed to get regulator value in register "
+ "%02x\n", r->reg);
+ return ret;
+ }
+
+ regval &= r->voltage_mask;
+ if (regval >= r->desc.n_voltages + r->num_holes)
+ return -EINVAL;
+
+ if (!r->voltages[regval])
+ return -EINVAL;
+
+ return r->voltages[regval];
+}
+
+static int ab5500_get_best_voltage_index(struct ab5500_regulator *r,
+ int min_uV, int max_uV)
+{
+ unsigned n_voltages = r->desc.n_voltages;
+ int bestmatch = INT_MAX;
+ int bestindex = -EINVAL;
+ int selindex;
+ int i;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ for (i = 0, selindex = 0; selindex < n_voltages; i++) {
+ int voltage = r->voltages[i];
+
+ if (!voltage)
+ continue;
+
+ if (voltage <= max_uV &&
+ voltage >= min_uV &&
+ voltage < bestmatch) {
+ bestmatch = voltage;
+ bestindex = i;
+ }
+
+ selindex++;
+ }
+
+ return bestindex;
+}
+
+static int ab5500_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev);
+ struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)];
+ int bestindex;
+
+ bestindex = ab5500_get_best_voltage_index(r, min_uV, max_uV);
+ if (bestindex < 0) {
+ dev_warn(rdev_get_dev(rdev),
+ "requested %d<=x<=%d uV, out of range!\n",
+ min_uV, max_uV);
+ return bestindex;
+ }
+
+ *selector = bestindex;
+
+ return abx500_mask_and_set_register_interruptible(ab5500->dev,
+ r->bank, r->reg, r->voltage_mask, bestindex);
+
+}
+
+static struct regulator_ops ab5500_regulator_variable_ops = {
+ .enable = ab5500_regulator_enable,
+ .disable = ab5500_regulator_disable,
+ .is_enabled = ab5500_regulator_is_enabled,
+ .enable_time = ab5500_regulator_enable_time,
+ .get_voltage = ab5500_regulator_get_voltage,
+ .set_voltage = ab5500_regulator_set_voltage,
+ .list_voltage = ab5500_regulator_list_voltage,
+ .set_mode = ab5500_regulator_set_mode,
+ .get_mode = ab5500_regulator_get_mode,
+ .get_optimum_mode = ab5500_regulator_get_optimum_mode,
+};
+
+static struct regulator_ops ab5500_regulator_fixed_ops = {
+ .enable = ab5500_regulator_enable,
+ .disable = ab5500_regulator_disable,
+ .is_enabled = ab5500_regulator_is_enabled,
+ .enable_time = ab5500_regulator_enable_time,
+ .get_voltage = ab5500_regulator_fixed_get_voltage,
+ .list_voltage = ab5500_regulator_list_voltage,
+ .set_mode = ab5500_regulator_set_mode,
+ .get_mode = ab5500_regulator_get_mode,
+ .get_optimum_mode = ab5500_regulator_get_optimum_mode,
+};
+
+static const int ab5500_ldo_lg_voltages[] = {
+ [0x00] = 1200000,
+ [0x01] = 0, /* not used */
+ [0x02] = 1500000,
+ [0x03] = 1800000,
+ [0x04] = 0, /* not used */
+ [0x05] = 2500000,
+ [0x06] = 2730000,
+ [0x07] = 2910000,
+};
+
+static const int ab5500_ldo_kh_voltages[] = {
+ [0x00] = 1200000,
+ [0x01] = 1500000,
+ [0x02] = 1800000,
+ [0x03] = 2100000,
+ [0x04] = 2500000,
+ [0x05] = 2750000,
+ [0x06] = 2790000,
+ [0x07] = 2910000,
+};
+
+static const int ab5500_ldo_vdigmic_voltages[] = {
+ [0x00] = 2100000,
+};
+
+static const int ab5500_ldo_sim_voltages[] = {
+ [0x00] = 1875000,
+ [0x01] = 2800000,
+ [0x02] = 2900000,
+};
+
+static const int ab5500_bias2_voltages[] = {
+ [0x00] = 2000000,
+ [0x01] = 2200000,
+};
+
+static const int ab5500_bias1_voltages[] = {
+ [0x00] = 2000000,
+};
+
+static struct ab5500_regulator ab5500_regulators[] = {
+ [AB5500_LDO_L] = {
+ .desc = {
+ .name = "LDO_L",
+ .id = AB5500_LDO_L,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) -
+ 2,
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_L_ST,
+ .voltages = ab5500_ldo_lg_voltages,
+ .num_holes = 2, /* 2 register values unused */
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_G] = {
+ .desc = {
+ .name = "LDO_G",
+ .id = AB5500_LDO_G,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) -
+ 2,
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_G_ST,
+ .voltages = ab5500_ldo_lg_voltages,
+ .num_holes = 2, /* 2 register values unused */
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_K] = {
+ .desc = {
+ .name = "LDO_K",
+ .id = AB5500_LDO_K,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_K_ST,
+ .voltages = ab5500_ldo_kh_voltages,
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_H] = {
+ .desc = {
+ .name = "LDO_H",
+ .id = AB5500_LDO_H,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_H_ST,
+ .voltages = ab5500_ldo_kh_voltages,
+ .enable_time = 400,
+ .load_lp_uA = 20000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_VDIGMIC] = {
+ .desc = {
+ .name = "LDO_VDIGMIC",
+ .id = AB5500_LDO_VDIGMIC,
+ .ops = &ab5500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages =
+ ARRAY_SIZE(ab5500_ldo_vdigmic_voltages),
+ },
+ .bank = AB5500_BANK_STARTUP,
+ .reg = AB5500_LDO_VDIGMIC_ST,
+ .voltages = ab5500_ldo_vdigmic_voltages,
+ .enable_time = 450,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_LDO_SIM] = {
+ .desc = {
+ .name = "LDO_SIM",
+ .id = AB5500_LDO_SIM,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_ldo_sim_voltages),
+ },
+ .bank = AB5500_BANK_SIM_USBSIM,
+ .reg = AB5500_SIM_SUP,
+ .voltages = ab5500_ldo_sim_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_LDO_MODE_FULLPOWER,
+ .update_mask = AB5500_LDO_MODE_MASK,
+ .update_val_normal = AB5500_LDO_MODE_FULLPOWER,
+ .update_val_idle = AB5500_LDO_MODE_LOWPOWER,
+ .voltage_mask = AB5500_LDO_VOLT_MASK,
+ },
+ [AB5500_BIAS2] = {
+ .desc = {
+ .name = "MBIAS2",
+ .id = AB5500_BIAS2,
+ .ops = &ab5500_regulator_variable_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_bias2_voltages),
+ },
+ .bank = AB5500_BANK_AUDIO_HEADSETUSB,
+ .reg = AB5500_MBIAS2,
+ .voltages = ab5500_bias2_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_MBIAS2_ENABLE,
+ .update_mask = AB5500_MBIAS2_MODE_MASK,
+ .update_val_normal = AB5500_MBIAS2_ENABLE,
+ .update_val_idle = AB5500_MBIAS2_ENABLE,
+ .voltage_mask = AB5500_MBIAS2_VOLT_MASK,
+ },
+ [AB5500_BIAS1] = {
+ .desc = {
+ .name = "MBIAS1",
+ .id = AB5500_BIAS1,
+ .ops = &ab5500_regulator_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ab5500_bias1_voltages),
+ },
+ .bank = AB5500_BANK_AUDIO_HEADSETUSB,
+ .reg = AB5500_MBIAS1,
+ .voltages = ab5500_bias1_voltages,
+ .enable_time = 1000,
+ .mode = AB5500_MBIAS1_ENABLE,
+ .update_mask = AB5500_MBIAS1_MODE_MASK,
+ .update_val_normal = AB5500_MBIAS1_ENABLE,
+ .update_val_idle = AB5500_MBIAS1_ENABLE,
+ },
+};
+
+
+static int __devinit ab5500_regulator_probe(struct platform_device *pdev)
+{
+ struct ab5500_platform_data *ppdata = pdev->dev.parent->platform_data;
+ struct ab5500_regulator_platform_data *pdata = ppdata->regulator;
+ struct ab5500_regulator_data *regdata;
+ struct ab5500_regulators *ab5500;
+ int err = 0;
+ int i;
+
+ if (!pdata || !pdata->regulator)
+ return -EINVAL;
+
+ ab5500 = kzalloc(sizeof(*ab5500), GFP_KERNEL);
+ if (!ab5500)
+ return -ENOMEM;
+
+ ab5500->dev = &pdev->dev;
+ regdata = pdata->data;
+
+ platform_set_drvdata(pdev, ab5500);
+
+ for (i = 0; i < AB5500_NUM_REGULATORS; i++) {
+ struct ab5500_regulator *regulator = &ab5500_regulators[i];
+ struct regulator_dev *rdev;
+
+ if (regdata)
+ regulator->off_is_lowpower = regdata[i].off_is_lowpower;
+
+ ab5500->regulator[i] = regulator;
+
+ rdev = regulator_register(&regulator->desc, &pdev->dev,
+ &pdata->regulator[i], ab5500);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ dev_err(&pdev->dev, "failed to register regulator %s err %d\n",
+ regulator->desc.name, err);
+ goto err_unregister;
+ }
+
+ ab5500->rdev[i] = rdev;
+ }
+
+ return 0;
+
+err_unregister:
+ /* remove the already registered regulators */
+ while (--i >= 0)
+ regulator_unregister(ab5500->rdev[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ab5500);
+
+ return err;
+}
+
+static int __devexit ab5500_regulators_remove(struct platform_device *pdev)
+{
+ struct ab5500_regulators *ab5500 = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < AB5500_NUM_REGULATORS; i++)
+ regulator_unregister(ab5500->rdev[i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(ab5500);
+
+ return 0;
+}
+
+static struct platform_driver ab5500_regulator_driver = {
+ .driver = {
+ .name = "ab5500-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_regulator_probe,
+ .remove = __devexit_p(ab5500_regulators_remove),
+};
+
+static __init int ab5500_regulator_init(void)
+{
+ return platform_driver_register(&ab5500_regulator_driver);
+}
+
+static __exit void ab5500_regulator_exit(void)
+{
+ platform_driver_unregister(&ab5500_regulator_driver);
+}
+
+subsys_initcall(ab5500_regulator_init);
+module_exit(ab5500_regulator_exit);
+
+MODULE_DESCRIPTION("AB5500 Regulator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ab5500-regulator");
diff --git a/drivers/regulator/ab8500-debug.c b/drivers/regulator/ab8500-debug.c
new file mode 100644
index 00000000000..f71cc26c135
--- /dev/null
+++ b/drivers/regulator/ab8500-debug.c
@@ -0,0 +1,2083 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson.
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/regulator/ab8500-debug.h>
+#include <linux/io.h>
+
+#include <mach/db8500-regs.h> /* U8500_BACKUPRAM1_BASE */
+#include <mach/hardware.h>
+
+#include "ab8500-debug.h"
+
+/* board profile address - to determine if suspend-force is default */
+#define BOOT_INFO_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0xffc)
+#define BOARD_PROFILE_BACKUPRAM1 (0x3)
+
+/* board profile option */
+#define OPTION_BOARD_VERSION_V5X 50
+
+/* for error prints */
+struct device *dev;
+struct platform_device *pdev;
+
+/* setting for suspend force (disabled by default) */
+static bool setting_suspend_force;
+
+/*
+ * regulator states
+ */
+enum ab8500_regulator_state_id {
+ AB8500_REGULATOR_STATE_INIT,
+ AB8500_REGULATOR_STATE_SUSPEND,
+ AB8500_REGULATOR_STATE_SUSPEND_CORE,
+ AB8500_REGULATOR_STATE_RESUME_CORE,
+ AB8500_REGULATOR_STATE_RESUME,
+ AB8500_REGULATOR_STATE_CURRENT,
+ NUM_REGULATOR_STATE
+};
+
+static const char *regulator_state_name[NUM_REGULATOR_STATE] = {
+ [AB8500_REGULATOR_STATE_INIT] = "init",
+ [AB8500_REGULATOR_STATE_SUSPEND] = "suspend",
+ [AB8500_REGULATOR_STATE_SUSPEND_CORE] = "suspend-core",
+ [AB8500_REGULATOR_STATE_RESUME_CORE] = "resume-core",
+ [AB8500_REGULATOR_STATE_RESUME] = "resume",
+ [AB8500_REGULATOR_STATE_CURRENT] = "current",
+};
+
+/*
+ * regulator register definitions
+ */
+enum ab8500_register_id {
+ AB8500_REGU_NOUSE, /* if not defined */
+ AB8500_REGU_REQUEST_CTRL1,
+ AB8500_REGU_REQUEST_CTRL2,
+ AB8500_REGU_REQUEST_CTRL3,
+ AB8500_REGU_REQUEST_CTRL4,
+ AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ AB8500_REGU_HW_HP_REQ1_VALID1,
+ AB8500_REGU_HW_HP_REQ1_VALID2,
+ AB8500_REGU_HW_HP_REQ2_VALID1,
+ AB8500_REGU_HW_HP_REQ2_VALID2,
+ AB8500_REGU_SW_HP_REQ_VALID1,
+ AB8500_REGU_SW_HP_REQ_VALID2,
+ AB8500_REGU_SYSCLK_REQ1_VALID,
+ AB8500_REGU_SYSCLK_REQ2_VALID,
+ AB9540_REGU_VAUX4_REQ_VALID,
+ AB8500_REGU_MISC1,
+ AB8500_REGU_OTG_SUPPLY_CTRL,
+ AB8500_REGU_VUSB_CTRL,
+ AB8500_REGU_VAUDIO_SUPPLY,
+ AB8500_REGU_CTRL1_VAMIC,
+ AB8500_REGU_ARM_REGU1,
+ AB8500_REGU_ARM_REGU2,
+ AB8500_REGU_VAPE_REGU,
+ AB8500_REGU_VSMPS1_REGU,
+ AB8500_REGU_VSMPS2_REGU,
+ AB8500_REGU_VSMPS3_REGU,
+ AB8500_REGU_VPLL_VANA_REGU,
+ AB8500_REGU_VREF_DDR,
+ AB8500_REGU_EXT_SUPPLY_REGU,
+ AB8500_REGU_VAUX12_REGU,
+ AB8500_REGU_VRF1_VAUX3_REGU,
+ AB8500_REGU_VARM_SEL1,
+ AB8500_REGU_VARM_SEL2,
+ AB8500_REGU_VARM_SEL3,
+ AB8500_REGU_VAPE_SEL1,
+ AB8500_REGU_VAPE_SEL2,
+ AB8500_REGU_VAPE_SEL3,
+ AB9540_REGU_VAUX4_REQ_CTRL,
+ AB9540_REGU_VAUX4_REGU,
+ AB9540_REGU_VAUX4_SEL,
+ AB8500_REGU_VBB_SEL1,
+ AB8500_REGU_VBB_SEL2,
+ AB8500_REGU_VSMPS1_SEL1,
+ AB8500_REGU_VSMPS1_SEL2,
+ AB8500_REGU_VSMPS1_SEL3,
+ AB8500_REGU_VSMPS2_SEL1,
+ AB8500_REGU_VSMPS2_SEL2,
+ AB8500_REGU_VSMPS2_SEL3,
+ AB8500_REGU_VSMPS3_SEL1,
+ AB8500_REGU_VSMPS3_SEL2,
+ AB8500_REGU_VSMPS3_SEL3,
+ AB8500_REGU_VAUX1_SEL,
+ AB8500_REGU_VAUX2_SEL,
+ AB8500_REGU_VRF1_VAUX3_SEL,
+ AB8500_REGU_CTRL_EXT_SUP,
+ AB8500_REGU_VMOD_REGU,
+ AB8500_REGU_VMOD_SEL1,
+ AB8500_REGU_VMOD_SEL2,
+ AB8500_REGU_CTRL_DISCH,
+ AB8500_REGU_CTRL_DISCH2,
+ AB9540_REGU_CTRL_DISCH3,
+ AB8500_OTHER_SYSCLK_CTRL, /* Other */
+ AB8500_OTHER_VSIM_SYSCLK_CTRL, /* Other */
+ AB8500_OTHER_SYSULPCLK_CTRL1, /* Other */
+ NUM_AB8500_REGISTER
+};
+
+struct ab8500_register {
+ const char *name;
+ u8 bank;
+ u8 addr;
+ u8 unavailable; /* Used to flag when AB doesn't support a register */
+};
+
+static struct ab8500_register
+ ab8500_register[NUM_AB8500_REGISTER] = {
+ [AB8500_REGU_REQUEST_CTRL1] = {
+ .name = "ReguRequestCtrl1",
+ .bank = 0x03,
+ .addr = 0x03,
+ },
+ [AB8500_REGU_REQUEST_CTRL2] = {
+ .name = "ReguRequestCtrl2",
+ .bank = 0x03,
+ .addr = 0x04,
+ },
+ [AB8500_REGU_REQUEST_CTRL3] = {
+ .name = "ReguRequestCtrl3",
+ .bank = 0x03,
+ .addr = 0x05,
+ },
+ [AB8500_REGU_REQUEST_CTRL4] = {
+ .name = "ReguRequestCtrl4",
+ .bank = 0x03,
+ .addr = 0x06,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_HP_VALID1] = {
+ .name = "ReguSysClkReq1HPValid",
+ .bank = 0x03,
+ .addr = 0x07,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_HP_VALID2] = {
+ .name = "ReguSysClkReq1HPValid2",
+ .bank = 0x03,
+ .addr = 0x08,
+ },
+ [AB8500_REGU_HW_HP_REQ1_VALID1] = {
+ .name = "ReguHwHPReq1Valid1",
+ .bank = 0x03,
+ .addr = 0x09,
+ },
+ [AB8500_REGU_HW_HP_REQ1_VALID2] = {
+ .name = "ReguHwHPReq1Valid2",
+ .bank = 0x03,
+ .addr = 0x0a,
+ },
+ [AB8500_REGU_HW_HP_REQ2_VALID1] = {
+ .name = "ReguHwHPReq2Valid1",
+ .bank = 0x03,
+ .addr = 0x0b,
+ },
+ [AB8500_REGU_HW_HP_REQ2_VALID2] = {
+ .name = "ReguHwHPReq2Valid2",
+ .bank = 0x03,
+ .addr = 0x0c,
+ },
+ [AB8500_REGU_SW_HP_REQ_VALID1] = {
+ .name = "ReguSwHPReqValid1",
+ .bank = 0x03,
+ .addr = 0x0d,
+ },
+ [AB8500_REGU_SW_HP_REQ_VALID2] = {
+ .name = "ReguSwHPReqValid2",
+ .bank = 0x03,
+ .addr = 0x0e,
+ },
+ [AB8500_REGU_SYSCLK_REQ1_VALID] = {
+ .name = "ReguSysClkReqValid1",
+ .bank = 0x03,
+ .addr = 0x0f,
+ },
+ [AB8500_REGU_SYSCLK_REQ2_VALID] = {
+ .name = "ReguSysClkReqValid2",
+ .bank = 0x03,
+ .addr = 0x10,
+ },
+ [AB9540_REGU_VAUX4_REQ_VALID] = {
+ .name = "ReguVaux4ReqValid",
+ .bank = 0x03,
+ .addr = 0x11,
+ .unavailable = true, /* ab9540 register */
+ },
+ [AB8500_REGU_MISC1] = {
+ .name = "ReguMisc1",
+ .bank = 0x03,
+ .addr = 0x80,
+ },
+ [AB8500_REGU_OTG_SUPPLY_CTRL] = {
+ .name = "OTGSupplyCtrl",
+ .bank = 0x03,
+ .addr = 0x81,
+ },
+ [AB8500_REGU_VUSB_CTRL] = {
+ .name = "VusbCtrl",
+ .bank = 0x03,
+ .addr = 0x82,
+ },
+ [AB8500_REGU_VAUDIO_SUPPLY] = {
+ .name = "VaudioSupply",
+ .bank = 0x03,
+ .addr = 0x83,
+ },
+ [AB8500_REGU_CTRL1_VAMIC] = {
+ .name = "ReguCtrl1VAmic",
+ .bank = 0x03,
+ .addr = 0x84,
+ },
+ [AB8500_REGU_ARM_REGU1] = {
+ .name = "ArmRegu1",
+ .bank = 0x04,
+ .addr = 0x00,
+ },
+ [AB8500_REGU_ARM_REGU2] = {
+ .name = "ArmRegu2",
+ .bank = 0x04,
+ .addr = 0x01,
+ },
+ [AB8500_REGU_VAPE_REGU] = {
+ .name = "VapeRegu",
+ .bank = 0x04,
+ .addr = 0x02,
+ },
+ [AB8500_REGU_VSMPS1_REGU] = {
+ .name = "Vsmps1Regu",
+ .bank = 0x04,
+ .addr = 0x03,
+ },
+ [AB8500_REGU_VSMPS2_REGU] = {
+ .name = "Vsmps2Regu",
+ .bank = 0x04,
+ .addr = 0x04,
+ },
+ [AB8500_REGU_VSMPS3_REGU] = {
+ .name = "Vsmps3Regu",
+ .bank = 0x04,
+ .addr = 0x05,
+ },
+ [AB8500_REGU_VPLL_VANA_REGU] = {
+ .name = "VpllVanaRegu",
+ .bank = 0x04,
+ .addr = 0x06,
+ },
+ [AB8500_REGU_VREF_DDR] = {
+ .name = "VrefDDR",
+ .bank = 0x04,
+ .addr = 0x07,
+ },
+ [AB8500_REGU_EXT_SUPPLY_REGU] = {
+ .name = "ExtSupplyRegu",
+ .bank = 0x04,
+ .addr = 0x08,
+ },
+ [AB8500_REGU_VAUX12_REGU] = {
+ .name = "Vaux12Regu",
+ .bank = 0x04,
+ .addr = 0x09,
+ },
+ [AB8500_REGU_VRF1_VAUX3_REGU] = {
+ .name = "VRF1Vaux3Regu",
+ .bank = 0x04,
+ .addr = 0x0a,
+ },
+ [AB8500_REGU_VARM_SEL1] = {
+ .name = "VarmSel1",
+ .bank = 0x04,
+ .addr = 0x0b,
+ },
+ [AB8500_REGU_VARM_SEL2] = {
+ .name = "VarmSel2",
+ .bank = 0x04,
+ .addr = 0x0c,
+ },
+ [AB8500_REGU_VARM_SEL3] = {
+ .name = "VarmSel3",
+ .bank = 0x04,
+ .addr = 0x0d,
+ },
+ [AB8500_REGU_VAPE_SEL1] = {
+ .name = "VapeSel1",
+ .bank = 0x04,
+ .addr = 0x0e,
+ },
+ [AB8500_REGU_VAPE_SEL2] = {
+ .name = "VapeSel2",
+ .bank = 0x04,
+ .addr = 0x0f,
+ },
+ [AB8500_REGU_VAPE_SEL3] = {
+ .name = "VapeSel3",
+ .bank = 0x04,
+ .addr = 0x10,
+ },
+ [AB9540_REGU_VAUX4_REQ_CTRL] = {
+ .name = "Vaux4ReqCtrl",
+ .bank = 0x04,
+ .addr = 0x2d,
+ .unavailable = true, /* ab9540 register */
+ },
+ [AB9540_REGU_VAUX4_REGU] = {
+ .name = "Vaux4Regu",
+ .bank = 0x04,
+ .addr = 0x2e,
+ .unavailable = true, /* ab9540 register */
+ },
+ [AB9540_REGU_VAUX4_SEL] = {
+ .name = "Vaux4Sel",
+ .bank = 0x04,
+ .addr = 0x2f,
+ .unavailable = true, /* ab9540 register */
+ },
+ [AB8500_REGU_VBB_SEL1] = {
+ .name = "VBBSel1",
+ .bank = 0x04,
+ .addr = 0x11,
+ },
+ [AB8500_REGU_VBB_SEL2] = {
+ .name = "VBBSel2",
+ .bank = 0x04,
+ .addr = 0x12,
+ },
+ [AB8500_REGU_VSMPS1_SEL1] = {
+ .name = "Vsmps1Sel1",
+ .bank = 0x04,
+ .addr = 0x13,
+ },
+ [AB8500_REGU_VSMPS1_SEL2] = {
+ .name = "Vsmps1Sel2",
+ .bank = 0x04,
+ .addr = 0x14,
+ },
+ [AB8500_REGU_VSMPS1_SEL3] = {
+ .name = "Vsmps1Sel3",
+ .bank = 0x04,
+ .addr = 0x15,
+ },
+ [AB8500_REGU_VSMPS2_SEL1] = {
+ .name = "Vsmps2Sel1",
+ .bank = 0x04,
+ .addr = 0x17,
+ },
+ [AB8500_REGU_VSMPS2_SEL2] = {
+ .name = "Vsmps2Sel2",
+ .bank = 0x04,
+ .addr = 0x18,
+ },
+ [AB8500_REGU_VSMPS2_SEL3] = {
+ .name = "Vsmps2Sel3",
+ .bank = 0x04,
+ .addr = 0x19,
+ },
+ [AB8500_REGU_VSMPS3_SEL1] = {
+ .name = "Vsmps3Sel1",
+ .bank = 0x04,
+ .addr = 0x1b,
+ },
+ [AB8500_REGU_VSMPS3_SEL2] = {
+ .name = "Vsmps3Sel2",
+ .bank = 0x04,
+ .addr = 0x1c,
+ },
+ [AB8500_REGU_VSMPS3_SEL3] = {
+ .name = "Vsmps3Sel3",
+ .bank = 0x04,
+ .addr = 0x1d,
+ },
+ [AB8500_REGU_VAUX1_SEL] = {
+ .name = "Vaux1Sel",
+ .bank = 0x04,
+ .addr = 0x1f,
+ },
+ [AB8500_REGU_VAUX2_SEL] = {
+ .name = "Vaux2Sel",
+ .bank = 0x04,
+ .addr = 0x20,
+ },
+ [AB8500_REGU_VRF1_VAUX3_SEL] = {
+ .name = "VRF1Vaux3Sel",
+ .bank = 0x04,
+ .addr = 0x21,
+ },
+ [AB8500_REGU_CTRL_EXT_SUP] = {
+ .name = "ReguCtrlExtSup",
+ .bank = 0x04,
+ .addr = 0x22,
+ },
+ [AB8500_REGU_VMOD_REGU] = {
+ .name = "VmodRegu",
+ .bank = 0x04,
+ .addr = 0x40,
+ },
+ [AB8500_REGU_VMOD_SEL1] = {
+ .name = "VmodSel1",
+ .bank = 0x04,
+ .addr = 0x41,
+ },
+ [AB8500_REGU_VMOD_SEL2] = {
+ .name = "VmodSel2",
+ .bank = 0x04,
+ .addr = 0x42,
+ },
+ [AB8500_REGU_CTRL_DISCH] = {
+ .name = "ReguCtrlDisch",
+ .bank = 0x04,
+ .addr = 0x43,
+ },
+ [AB8500_REGU_CTRL_DISCH2] = {
+ .name = "ReguCtrlDisch2",
+ .bank = 0x04,
+ .addr = 0x44,
+ },
+ [AB9540_REGU_CTRL_DISCH3] = {
+ .name = "ReguCtrlDisch3",
+ .bank = 0x04,
+ .addr = 0x48,
+ .unavailable = true, /* ab9540 register */
+ },
+ /* Outside regulator banks */
+ [AB8500_OTHER_SYSCLK_CTRL] = {
+ .name = "SysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x0c,
+ },
+ [AB8500_OTHER_VSIM_SYSCLK_CTRL] = {
+ .name = "VsimSysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x33,
+ },
+ [AB8500_OTHER_SYSULPCLK_CTRL1] = {
+ .name = "SysUlpClkCtrl1",
+ .bank = 0x02,
+ .addr = 0x0b,
+ },
+};
+
+struct ab9540_register_update {
+ /* Identity of register to be updated */
+ u8 bank;
+ u8 addr;
+ /* New value for unavailable flag */
+ u8 unavailable;
+};
+
+static const struct ab9540_register_update ab9540_update[] = {
+ /* AB8500 register which is unavailable to AB9540 */
+ /* AB8500_REGU_VREF_DDR */
+ {
+ .bank = 0x04,
+ .addr = 0x07,
+ .unavailable = true,
+ },
+
+ /* Registers which were not available to AB8500 but are on the
+ * AB9540. */
+ /* AB9540_REGU_VAUX4_REQ_VALID */
+ {
+ .bank = 0x03,
+ .addr = 0x11,
+ },
+ /* AB9540_REGU_VAUX4_REQ_CTRL */
+ {
+ .bank = 0x04,
+ .addr = 0x2d,
+ },
+ /* AB9540_REGU_VAUX4_REGU */
+ {
+ .bank = 0x04,
+ .addr = 0x2e,
+ },
+ /* AB9540_REGU_VAUX4_SEL */
+ {
+ .bank = 0x04,
+ .addr = 0x2f,
+ },
+ /* AB9540_REGU_CTRL_DISCH3 */
+ {
+ .bank = 0x04,
+ .addr = 0x48,
+ },
+};
+
+static void ab9540_registers_update(void)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < NUM_AB8500_REGISTER; i++)
+ for (j = 0; j < ARRAY_SIZE(ab9540_update); j++)
+ if (ab8500_register[i].bank == ab9540_update[j].bank &&
+ ab8500_register[i].addr == ab9540_update[j].addr) {
+ ab8500_register[i].unavailable =
+ ab9540_update[j].unavailable;
+ break;
+ }
+}
+
+static u8 ab8500_register_state[NUM_REGULATOR_STATE][NUM_AB8500_REGISTER];
+static bool ab8500_register_state_saved[NUM_REGULATOR_STATE];
+static bool ab8500_register_state_save = true;
+
+static int ab8500_regulator_record_state(int state)
+{
+ u8 val;
+ int i;
+ int ret;
+
+ /* check arguments */
+ if ((state > NUM_REGULATOR_STATE) || (state < 0)) {
+ dev_err(dev, "Wrong state specified\n");
+ return -EINVAL;
+ }
+
+ /* record */
+ if (!ab8500_register_state_save)
+ goto exit;
+
+ ab8500_register_state_saved[state] = true;
+
+ for (i = 1; i < NUM_AB8500_REGISTER; i++) {
+ if (ab8500_register[i].unavailable)
+ continue;
+
+ ret = abx500_get_register_interruptible(dev,
+ ab8500_register[i].bank,
+ ab8500_register[i].addr,
+ &val);
+ if (ret < 0) {
+ dev_err(dev, "abx500_get_reg fail %d, %d\n",
+ ret, __LINE__);
+ return -EINVAL;
+ }
+
+ ab8500_register_state[state][i] = val;
+ }
+exit:
+ return 0;
+}
+
+/*
+ * regulator register dump
+ */
+static int ab8500_regulator_dump_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int state, reg_id, i;
+ int err;
+
+ /* record current state */
+ ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT);
+
+ /* print dump header */
+ err = seq_printf(s, "ab8500-regulator dump:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print states */
+ for (state = NUM_REGULATOR_STATE - 1; state >= 0; state--) {
+ if (ab8500_register_state_saved[state])
+ err = seq_printf(s, "%16s saved -------",
+ regulator_state_name[state]);
+ else
+ err = seq_printf(s, "%12s not saved -------",
+ regulator_state_name[state]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ for (i = 0; i < NUM_REGULATOR_STATE; i++) {
+ if (i < state)
+ err = seq_printf(s, "-----");
+ else if (i == state)
+ err = seq_printf(s, "----+");
+ else
+ err = seq_printf(s, " |");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n",
+ __LINE__);
+ }
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ }
+
+ /* print labels */
+ err = seq_printf(s, "\n addr\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ /* dump registers */
+ for (reg_id = 1; reg_id < NUM_AB8500_REGISTER; reg_id++) {
+ if (ab8500_register[reg_id].unavailable)
+ continue;
+
+ err = seq_printf(s, "%22s 0x%02x%02x:",
+ ab8500_register[reg_id].name,
+ ab8500_register[reg_id].bank,
+ ab8500_register[reg_id].addr);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+
+ for (state = 0; state < NUM_REGULATOR_STATE; state++) {
+ err = seq_printf(s, " 0x%02x",
+ ab8500_register_state[state][reg_id]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+ }
+
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ reg_id, __LINE__);
+ }
+
+ return 0;
+}
+
+static int ab8500_regulator_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_regulator_dump_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_dump_fops = {
+ .open = ab8500_regulator_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * regulator_voltage
+ */
+struct regulator_volt {
+ u8 value;
+ int volt;
+};
+
+struct regulator_volt_range {
+ struct regulator_volt start;
+ struct regulator_volt step;
+ struct regulator_volt end;
+};
+
+/*
+ * ab8500_regulator
+ * @name
+ * @update_regid
+ * @update_mask
+ * @update_val[4] {off, on, hw, lp}
+ * @hw_mode_regid
+ * @hw_mode_mask
+ * @hw_mode_val[4] {hp/lp, hp/off, hp, hp}
+ * @hw_valid_regid[4] {sysclkreq1, hw1, hw2, sw}
+ * @hw_valid_mask[4] {sysclkreq1, hw1, hw2, sw}
+ * @vsel_sel_regid
+ * @vsel_sel_mask
+ * @vsel_val[333] {sel1, sel2, sel3, sel3}
+ * @vsel_regid
+ * @vsel_mask
+ * @vsel_range
+ * @vsel_range_len
+ * @unavailable {true/false depending on whether AB supports the regulator}
+ */
+struct ab8500_regulator {
+ const char *name;
+ int update_regid;
+ u8 update_mask;
+ u8 update_val[4];
+ int hw_mode_regid;
+ u8 hw_mode_mask;
+ u8 hw_mode_val[4];
+ int hw_valid_regid[4];
+ u8 hw_valid_mask[4];
+ int vsel_sel_regid;
+ u8 vsel_sel_mask;
+ u8 vsel_sel_val[4];
+ int vsel_regid[3];
+ u8 vsel_mask[3];
+ struct regulator_volt_range const *vsel_range[3];
+ int vsel_range_len[3];
+ u8 unavailable;
+};
+
+static const char *update_val_name[] = {
+ "off",
+ "on ",
+ "hw ",
+ "lp ",
+ " - " /* undefined value */
+};
+
+static const char *hw_mode_val_name[] = {
+ "hp/lp ",
+ "hp/off",
+ "hp ",
+ "hp ",
+ "-/- ", /* undefined value */
+};
+
+/* voltage selection */
+/* AB8500 device - Varm_vsel in 12.5mV steps */
+#define AB8500_VARM_VSEL_MASK 0x3f
+static const struct regulator_volt_range ab8500_varm_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1362500} },
+ { {0x36, 1362500}, {0x01, 0}, {0x3f, 1362500} },
+};
+
+/* AB9540 device - Varm_vsel in 6.25mV steps */
+#define AB9540_VARM_VSEL_MASK 0x7f
+static const struct regulator_volt_range ab9540_varm_vsel[] = {
+ { {0x00, 600000}, {0x01, 6250}, {0x7f, 1393750} },
+};
+
+static const struct regulator_volt_range vape_vmod_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1362500} },
+ { {0x36, 1362500}, {0x01, 0}, {0x3f, 1362500} },
+};
+
+/* AB8500 device - Vbbp_vsel and Vbbn_sel in 100mV steps */
+static const struct regulator_volt_range ab8500_vbbp_vsel[] = {
+ { {0x00, 0}, {0x10, 100000}, {0x40, 400000} },
+ { {0x50, 400000}, {0x10, 0}, {0x70, 400000} },
+ { {0x80, -400000}, {0x10, 0}, {0xb0, -400000} },
+ { {0xc0, -400000}, {0x10, 100000}, {0xf0, -100000} },
+};
+
+static const struct regulator_volt_range ab8500_vbbn_vsel[] = {
+ { {0x00, 0}, {0x01, -100000}, {0x04, -400000} },
+ { {0x05, -400000}, {0x01, 0}, {0x07, -400000} },
+ { {0x08, 0}, {0x01, 100000}, {0x0c, 400000} },
+ { {0x0d, 400000}, {0x01, 0}, {0x0f, 400000} },
+};
+
+/* AB9540 device - Vbbp_vsel and Vbbn_sel in 50mV steps */
+static const struct regulator_volt_range ab9540_vbbp_vsel[] = {
+ { {0x00, 0}, {0x10, -50000}, {0x70, -350000} },
+ { {0x80, 50000}, {0x10, 50000}, {0xf0, 400000} },
+};
+
+static const struct regulator_volt_range ab9540_vbbn_vsel[] = {
+ { {0x00, 0}, {0x01, -50000}, {0x07, -350000} },
+ { {0x08, 50000}, {0x01, 50000}, {0x0f, 400000} },
+};
+
+static const struct regulator_volt_range vsmps1_vsel[] = {
+ { {0x00, 1100000}, {0x01, 0}, {0x1f, 1100000} },
+ { {0x20, 1100000}, {0x01, 12500}, {0x30, 1300000} },
+ { {0x31, 1300000}, {0x01, 0}, {0x3f, 1300000} },
+};
+
+static const struct regulator_volt_range vsmps2_vsel[] = {
+ { {0x00, 1800000}, {0x01, 0}, {0x38, 1800000} },
+ { {0x39, 1800000}, {0x01, 12500}, {0x7f, 1875000} },
+};
+
+static const struct regulator_volt_range vsmps3_vsel[] = {
+ { {0x00, 700000}, {0x01, 12500}, {0x35, 1363500} },
+ { {0x36, 1363500}, {0x01, 0}, {0x7f, 1363500} },
+};
+
+/* for Vaux1, Vaux2 and Vaux4 */
+static const struct regulator_volt_range vauxn_vsel[] = {
+ { {0x00, 1100000}, {0x01, 100000}, {0x04, 1500000} },
+ { {0x05, 1800000}, {0x01, 50000}, {0x07, 1900000} },
+ { {0x08, 2500000}, {0x01, 0}, {0x08, 2500000} },
+ { {0x09, 2650000}, {0x01, 50000}, {0x0c, 2800000} },
+ { {0x0d, 2900000}, {0x01, 100000}, {0x0e, 3000000} },
+ { {0x0f, 3300000}, {0x01, 0}, {0x0f, 3300000} },
+};
+
+static const struct regulator_volt_range vaux3_vsel[] = {
+ { {0x00, 1200000}, {0x01, 300000}, {0x03, 2100000} },
+ { {0x04, 2500000}, {0x01, 250000}, {0x05, 2750000} },
+ { {0x06, 2790000}, {0x01, 0}, {0x06, 2790000} },
+ { {0x07, 2910000}, {0x01, 0}, {0x07, 2910000} },
+};
+
+static const struct regulator_volt_range vrf1_vsel[] = {
+ { {0x00, 1800000}, {0x10, 200000}, {0x10, 2000000} },
+ { {0x20, 2150000}, {0x10, 0}, {0x20, 2150000} },
+ { {0x30, 2500000}, {0x10, 0}, {0x30, 2500000} },
+};
+
+static const struct regulator_volt_range vintcore12_vsel[] = {
+ { {0x00, 1200000}, {0x08, 25000}, {0x30, 1350000} },
+ { {0x38, 1350000}, {0x01, 0}, {0x38, 1350000} },
+};
+
+/* regulators */
+static struct ab8500_regulator ab8500_regulator[AB8500_NUM_REGULATORS] = {
+ [AB8500_VARM] = {
+ .name = "Varm",
+ .update_regid = AB8500_REGU_ARM_REGU1,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x02,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VARM_SEL1,
+ .vsel_mask[0] = AB8500_VARM_VSEL_MASK,
+ .vsel_range[0] = ab8500_varm_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(ab8500_varm_vsel),
+ .vsel_regid[1] = AB8500_REGU_VARM_SEL2,
+ .vsel_mask[1] = AB8500_VARM_VSEL_MASK,
+ .vsel_range[1] = ab8500_varm_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(ab8500_varm_vsel),
+ .vsel_regid[2] = AB8500_REGU_VARM_SEL3,
+ .vsel_mask[2] = AB8500_VARM_VSEL_MASK,
+ .vsel_range[2] = ab8500_varm_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(ab8500_varm_vsel),
+ },
+ [AB8500_VBBP] = {
+ .name = "Vbbp",
+ .update_regid = AB8500_REGU_ARM_REGU2,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x00},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x10,
+ .vsel_sel_val = {0x00, 0x10, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VBB_SEL1,
+ .vsel_mask[0] = 0xf0,
+ .vsel_range[0] = ab8500_vbbp_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(ab8500_vbbp_vsel),
+ .vsel_regid[1] = AB8500_REGU_VBB_SEL2,
+ .vsel_mask[1] = 0xf0,
+ .vsel_range[1] = ab8500_vbbp_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(ab8500_vbbp_vsel),
+ },
+ [AB8500_VBBN] = {
+ .name = "Vbbn",
+ .update_regid = AB8500_REGU_ARM_REGU2,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x00},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_ARM_REGU1,
+ .vsel_sel_mask = 0x20,
+ .vsel_sel_val = {0x00, 0x20, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VBB_SEL1,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = ab8500_vbbn_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(ab8500_vbbn_vsel),
+ .vsel_regid[1] = AB8500_REGU_VBB_SEL2,
+ .vsel_mask[1] = 0x0f,
+ .vsel_range[1] = ab8500_vbbn_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(ab8500_vbbn_vsel),
+ },
+ [AB8500_VAPE] = {
+ .name = "Vape",
+ .update_regid = AB8500_REGU_VAPE_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_sel_regid = AB8500_REGU_VAPE_REGU,
+ .vsel_sel_mask = 0x24,
+ .vsel_sel_val = {0x00, 0x04, 0x20, 0x24},
+ .vsel_regid[0] = AB8500_REGU_VAPE_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VAPE_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vape_vmod_vsel),
+ .vsel_regid[2] = AB8500_REGU_VAPE_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vape_vmod_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vape_vmod_vsel),
+ },
+ [AB8500_VSMPS1] = {
+ .name = "Vsmps1",
+ .update_regid = AB8500_REGU_VSMPS1_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x01,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x01,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x04,
+ .vsel_sel_regid = AB8500_REGU_VSMPS1_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS1_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vsmps1_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps1_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS1_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vsmps1_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps1_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS1_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vsmps1_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps1_vsel),
+ },
+ [AB8500_VSMPS2] = {
+ .name = "Vsmps2",
+ .update_regid = AB8500_REGU_VSMPS2_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x02,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x02,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x08,
+ .vsel_sel_regid = AB8500_REGU_VSMPS2_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS2_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vsmps2_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps2_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS2_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vsmps2_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps2_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS2_SEL3,
+ .vsel_mask[2] = 0x3f,
+ .vsel_range[2] = vsmps2_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps2_vsel),
+ },
+ [AB8500_VSMPS3] = {
+ .name = "Vsmps3",
+ .update_regid = AB8500_REGU_VSMPS3_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x04,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x04,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x10,
+ .vsel_sel_regid = AB8500_REGU_VSMPS3_REGU,
+ .vsel_sel_mask = 0x0c,
+ .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VSMPS3_SEL1,
+ .vsel_mask[0] = 0x7f,
+ .vsel_range[0] = vsmps3_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vsmps3_vsel),
+ .vsel_regid[1] = AB8500_REGU_VSMPS3_SEL2,
+ .vsel_mask[1] = 0x7f,
+ .vsel_range[1] = vsmps3_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vsmps3_vsel),
+ .vsel_regid[2] = AB8500_REGU_VSMPS3_SEL3,
+ .vsel_mask[2] = 0x7f,
+ .vsel_range[2] = vsmps3_vsel,
+ .vsel_range_len[2] = ARRAY_SIZE(vsmps3_vsel),
+ },
+ [AB8500_VPLL] = {
+ .name = "Vpll",
+ .update_regid = AB8500_REGU_VPLL_VANA_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x10,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x10,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x10,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x40,
+ },
+ [AB8500_VREFDDR] = {
+ .name = "VrefDDR",
+ .update_regid = AB8500_REGU_VREF_DDR,
+ .update_mask = 0x01,
+ .update_val = {0x00, 0x01, 0x00, 0x00},
+ },
+ [AB8500_VMOD] = {
+ .name = "Vmod",
+ .update_regid = AB8500_REGU_VMOD_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_VMOD_REGU,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x08,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x08,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x20,
+ .vsel_sel_regid = AB8500_REGU_VMOD_REGU,
+ .vsel_sel_mask = 0x04,
+ .vsel_sel_val = {0x00, 0x04, 0x00, 0x00},
+ .vsel_regid[0] = AB8500_REGU_VMOD_SEL1,
+ .vsel_mask[0] = 0x3f,
+ .vsel_range[0] = vape_vmod_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vape_vmod_vsel),
+ .vsel_regid[1] = AB8500_REGU_VMOD_SEL2,
+ .vsel_mask[1] = 0x3f,
+ .vsel_range[1] = vape_vmod_vsel,
+ .vsel_range_len[1] = ARRAY_SIZE(vape_vmod_vsel),
+ },
+ [AB8500_VEXTSUPPLY1] = {
+ .name = "Vextsupply1",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x10,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x01,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x01,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x04,
+ },
+ [AB8500_VEXTSUPPLY2] = {
+ .name = "VextSupply2",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x20,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x02,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x08,
+ },
+ [AB8500_VEXTSUPPLY3] = {
+ .name = "VextSupply3",
+ .update_regid = AB8500_REGU_EXT_SUPPLY_REGU,
+ .update_mask = 0x30,
+ .update_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x0c,
+ .hw_mode_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2,
+ .hw_valid_mask[0] = 0x40,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2,
+ .hw_valid_mask[2] = 0x04,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x10,
+ },
+ [AB8500_VRF1] = {
+ .name = "Vrf1",
+ .update_regid = AB8500_REGU_VRF1_VAUX3_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL,
+ .vsel_mask[0] = 0x30,
+ .vsel_range[0] = vrf1_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vrf1_vsel),
+ },
+ [AB8500_VANA] = {
+ .name = "Vana",
+ .update_regid = AB8500_REGU_VPLL_VANA_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x08,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x08,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x20,
+ },
+ [AB8500_VAUX1] = {
+ .name = "Vaux1",
+ .update_regid = AB8500_REGU_VAUX12_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0x30,
+ .hw_mode_val = {0x00, 0x10, 0x20, 0x30},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x20,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x20,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x20,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1,
+ .hw_valid_mask[3] = 0x80,
+ .vsel_regid[0] = AB8500_REGU_VAUX1_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vauxn_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vauxn_vsel),
+ },
+ [AB8500_VAUX2] = {
+ .name = "Vaux2",
+ .update_regid = AB8500_REGU_VAUX12_REGU,
+ .update_mask = 0x0c,
+ .update_val = {0x00, 0x04, 0x08, 0x0c},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3,
+ .hw_mode_mask = 0xc0,
+ .hw_mode_val = {0x00, 0x40, 0x80, 0xc0},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x40,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x40,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x40,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_regid[0] = AB8500_REGU_VAUX2_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vauxn_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vauxn_vsel),
+ },
+ [AB8500_VAUX3] = {
+ .name = "Vaux3",
+ .update_regid = AB8500_REGU_VRF1_VAUX3_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB8500_REGU_REQUEST_CTRL4,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1,
+ .hw_valid_mask[0] = 0x80,
+ .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1,
+ .hw_valid_mask[1] = 0x80,
+ .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1,
+ .hw_valid_mask[2] = 0x80,
+ .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2,
+ .hw_valid_mask[3] = 0x02,
+ .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL,
+ .vsel_mask[0] = 0x07,
+ .vsel_range[0] = vaux3_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vaux3_vsel),
+ },
+ [AB9540_VAUX4] = {
+ .name = "Vaux4",
+ .update_regid = AB9540_REGU_VAUX4_REGU,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_mode_regid = AB9540_REGU_VAUX4_REQ_CTRL,
+ .hw_mode_mask = 0x03,
+ .hw_mode_val = {0x00, 0x01, 0x02, 0x03},
+ .hw_valid_regid[0] = AB9540_REGU_VAUX4_REQ_VALID,
+ .hw_valid_mask[0] = 0x08,
+ .hw_valid_regid[1] = AB9540_REGU_VAUX4_REQ_VALID,
+ .hw_valid_mask[1] = 0x04,
+ .hw_valid_regid[2] = AB9540_REGU_VAUX4_REQ_VALID,
+ .hw_valid_mask[2] = 0x02,
+ .hw_valid_regid[3] = AB9540_REGU_VAUX4_REQ_VALID,
+ .hw_valid_mask[3] = 0x01,
+ .vsel_regid[0] = AB9540_REGU_VAUX4_SEL,
+ .vsel_mask[0] = 0x0f,
+ .vsel_range[0] = vauxn_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vauxn_vsel),
+ .unavailable = true, /* AB9540 regulator */
+ },
+ [AB8500_VINTCORE] = {
+ .name = "VintCore12",
+ .update_regid = AB8500_REGU_MISC1,
+ .update_mask = 0x44,
+ .update_val = {0x00, 0x04, 0x00, 0x44},
+ .vsel_regid[0] = AB8500_REGU_MISC1,
+ .vsel_mask[0] = 0x38,
+ .vsel_range[0] = vintcore12_vsel,
+ .vsel_range_len[0] = ARRAY_SIZE(vintcore12_vsel),
+ },
+ [AB8500_VTVOUT] = {
+ .name = "VTVout",
+ .update_regid = AB8500_REGU_MISC1,
+ .update_mask = 0x82,
+ .update_val = {0x00, 0x02, 0x00, 0x82},
+ },
+ [AB8500_VAUDIO] = {
+ .name = "Vaudio",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x02,
+ .update_val = {0x00, 0x02, 0x00, 0x00},
+ },
+ [AB8500_VANAMIC1] = {
+ .name = "Vanamic1",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x08,
+ .update_val = {0x00, 0x08, 0x00, 0x00},
+ },
+ [AB8500_VANAMIC2] = {
+ .name = "Vanamic2",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x10,
+ .update_val = {0x00, 0x10, 0x00, 0x00},
+ },
+ [AB8500_VDMIC] = {
+ .name = "Vdmic",
+ .update_regid = AB8500_REGU_VAUDIO_SUPPLY,
+ .update_mask = 0x04,
+ .update_val = {0x00, 0x04, 0x00, 0x00},
+ },
+ [AB8500_VUSB] = {
+ .name = "Vusb",
+ .update_regid = AB8500_REGU_VUSB_CTRL,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x00, 0x03},
+ },
+ [AB8500_VOTG] = {
+ .name = "VOTG",
+ .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL,
+ .update_mask = 0x03,
+ .update_val = {0x00, 0x01, 0x00, 0x03},
+ },
+ [AB8500_VBUSBIS] = {
+ .name = "Vbusbis",
+ .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL,
+ .update_mask = 0x08,
+ .update_val = {0x00, 0x08, 0x00, 0x00},
+ },
+};
+
+static void ab9540_regulators_update(void)
+{
+ /* Update unavailable regulators */
+ ab8500_regulator[AB8500_VREFDDR].unavailable = true;
+ ab8500_regulator[AB9540_VAUX4].unavailable = false;
+
+ /* Update regulator characteristics for AB9540 */
+ ab8500_regulator[AB8500_VARM].vsel_mask[0] = AB9540_VARM_VSEL_MASK;
+ ab8500_regulator[AB8500_VARM].vsel_range[0] = ab9540_varm_vsel;
+ ab8500_regulator[AB8500_VARM].vsel_range_len[0] =
+ ARRAY_SIZE(ab9540_varm_vsel);
+ ab8500_regulator[AB8500_VARM].vsel_mask[1] = AB9540_VARM_VSEL_MASK;
+ ab8500_regulator[AB8500_VARM].vsel_range[1] = ab9540_varm_vsel;
+ ab8500_regulator[AB8500_VARM].vsel_range_len[1] =
+ ARRAY_SIZE(ab9540_varm_vsel);
+ ab8500_regulator[AB8500_VARM].vsel_mask[2] = AB9540_VARM_VSEL_MASK;
+ ab8500_regulator[AB8500_VARM].vsel_range[2] = ab9540_varm_vsel;
+ ab8500_regulator[AB8500_VARM].vsel_range_len[2] =
+ ARRAY_SIZE(ab9540_varm_vsel);
+
+ ab8500_regulator[AB8500_VBBP].vsel_range[0] = ab9540_vbbp_vsel;
+ ab8500_regulator[AB8500_VBBP].vsel_range_len[0] =
+ ARRAY_SIZE(ab9540_vbbp_vsel);
+ ab8500_regulator[AB8500_VBBP].vsel_range[1] = ab9540_vbbp_vsel;
+ ab8500_regulator[AB8500_VBBP].vsel_range_len[1] =
+ ARRAY_SIZE(ab9540_vbbp_vsel);
+
+ ab8500_regulator[AB8500_VBBN].vsel_range[0] = ab9540_vbbn_vsel;
+ ab8500_regulator[AB8500_VBBN].vsel_range_len[0] =
+ ARRAY_SIZE(ab9540_vbbn_vsel);
+ ab8500_regulator[AB8500_VBBN].vsel_range[1] = ab9540_vbbn_vsel;
+ ab8500_regulator[AB8500_VBBN].vsel_range_len[1] =
+ ARRAY_SIZE(ab9540_vbbn_vsel);
+}
+
+static int status_state = AB8500_REGULATOR_STATE_CURRENT;
+
+static int _get_voltage(struct regulator_volt_range const *volt_range,
+ u8 value, int *volt)
+{
+ u8 start = volt_range->start.value;
+ u8 end = volt_range->end.value;
+ u8 step = volt_range->step.value;
+
+ /* Check if witin range */
+ if (step == 0) {
+ if (value == start) {
+ *volt = volt_range->start.volt;
+ return 1;
+ }
+ } else {
+ if ((start <= value) && (value <= end)) {
+ if ((value - start) % step != 0)
+ return -EINVAL; /* invalid setting */
+ *volt = volt_range->start.volt
+ + volt_range->step.volt
+ *((value - start) / step);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int get_voltage(struct regulator_volt_range const *volt_range,
+ int volt_range_len,
+ u8 value)
+{
+ int volt;
+ int i, ret;
+
+ for (i = 0; i < volt_range_len; i++) {
+ ret = _get_voltage(&volt_range[i], value, &volt);
+ if (ret < 0)
+ break; /* invalid setting */
+ if (ret == 1)
+ return volt; /* successful */
+ }
+
+ return -EINVAL;
+}
+
+static bool get_reg_and_mask(int regid, u8 mask, u8 *val)
+{
+ int ret;
+ u8 t;
+
+ if (!regid)
+ return false;
+
+ ret = abx500_get_register_interruptible(dev,
+ ab8500_register[regid].bank,
+ ab8500_register[regid].addr,
+ &t);
+ if (ret < 0)
+ return false;
+
+ (*val) = t & mask;
+
+ return true;
+}
+
+/* Convert regulator register value to index */
+static bool val2idx(u8 val, u8 *v, int len, int *idx)
+{
+ int i;
+
+ for (i = 0; i < len && v[i] != val; i++);
+
+ if (i == len)
+ return false;
+
+ (*idx) = i;
+ return true;
+}
+
+int ab8500_regulator_debug_read(enum ab8500_regulator_id id,
+ struct ab8500_debug_regulator_status *s)
+{
+ int i;
+ u8 val;
+ bool found;
+ int idx = 0;
+
+ if (id >= AB8500_NUM_REGULATORS)
+ return -EINVAL;
+
+ s->name = (char *)ab8500_regulator[id].name;
+
+ /* read mode */
+ (void) get_reg_and_mask(ab8500_regulator[id].update_regid,
+ ab8500_regulator[id].update_mask,
+ &val);
+
+ (void) val2idx(val, ab8500_regulator[id].update_val,
+ 4, &idx);
+
+ s->mode = (u8) idx;
+
+ /* read hw mode */
+ found = get_reg_and_mask(ab8500_regulator[id].hw_mode_regid,
+ ab8500_regulator[id].hw_mode_mask,
+ &val);
+
+ if (found)
+ found = val2idx(val, ab8500_regulator[id].hw_mode_val, 4, &idx);
+
+ if (found)
+ /* +1 since 0 = HWMODE_NONE */
+ s->hwmode = idx + 1;
+ else
+ s->hwmode = AB8500_HWMODE_NONE;
+
+ for (i = 0; i < 4 && found; i++) {
+
+ bool f = get_reg_and_mask(ab8500_regulator[id].hw_valid_regid[i],
+ ab8500_regulator[id].hw_valid_mask[i],
+ &val);
+ if (f)
+ s->hwmode_auto[i] = !!val;
+ else
+ s->hwmode_auto[i] = HWM_INVAL;
+ }
+
+ /* read voltage */
+ found = get_reg_and_mask(ab8500_regulator[id].vsel_sel_regid,
+ ab8500_regulator[id].vsel_sel_mask,
+ &val);
+ if (found)
+ found = val2idx(val, ab8500_regulator[id].vsel_sel_val,
+ 3, &idx);
+
+ if (found && idx < 3)
+ s->volt_selected = idx + 1;
+ else
+ s->volt_selected = 0;
+
+ for (s->volt_len = 0; s->volt_len < 3; s->volt_len++) {
+ int volt;
+ int i = s->volt_len;
+
+ found = get_reg_and_mask(ab8500_regulator[id].vsel_regid[i],
+ ab8500_regulator[id].vsel_mask[i],
+ &val);
+ if (!found)
+ break;
+
+ volt = get_voltage(ab8500_regulator[id].vsel_range[i],
+ ab8500_regulator[id].vsel_range_len[i],
+ val);
+ s->volt[i] = volt;
+ }
+ return 0;
+}
+
+static int ab8500_regulator_status_print(struct seq_file *s, void *p)
+{
+ struct device *dev = s->private;
+ int id, regid;
+ int i;
+ u8 val;
+ int err;
+
+ /* record current state */
+ ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT);
+
+ /* check if chosen state is recorded */
+ if (!ab8500_register_state_saved[status_state]) {
+ seq_printf(s, "ab8500-regulator status is not recorded.\n");
+ goto exit;
+ }
+
+ /* print dump header */
+ err = seq_printf(s, "ab8500-regulator status:\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+
+ /* print state */
+ for (i = 0; i < NUM_REGULATOR_STATE; i++) {
+ if (i == status_state)
+ err = seq_printf(s, "-> %i. %12s\n",
+ i, regulator_state_name[i]);
+ else
+ err = seq_printf(s, " %i. %12s\n",
+ i, regulator_state_name[i]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow\n");
+ }
+
+ /* print labels */
+ err = seq_printf(s,
+ "+-----------+----+--------------+-------------------------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "| name|man |auto |voltage |\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "+-----------+----+--------------+ +-----------------------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "| |mode|mode |0|1|2|3| | 1 | 2 | 3 |\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+ /* dump registers */
+ for (id = 0; id < AB8500_NUM_REGULATORS; id++) {
+ if (ab8500_register[id].unavailable ||
+ ab8500_regulator[id].unavailable)
+ continue;
+
+ /* print name */
+ err = seq_printf(s, "|%11s|",
+ ab8500_regulator[id].name);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print manual mode */
+ regid = ab8500_regulator[id].update_regid;
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].update_mask;
+ for (i = 0; i < 4; i++) {
+ if (val == ab8500_regulator[id].update_val[i])
+ break;
+ }
+ err = seq_printf(s, "%4s|",
+ update_val_name[i]);
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print auto mode */
+ regid = ab8500_regulator[id].hw_mode_regid;
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].hw_mode_mask;
+ for (i = 0; i < 4; i++) {
+ if (val == ab8500_regulator[id].hw_mode_val[i])
+ break;
+ }
+ err = seq_printf(s, "%6s|",
+ hw_mode_val_name[i]);
+ } else {
+ err = seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ id, __LINE__);
+
+ /* print valid bits */
+ for (i = 0; i < 4; i++) {
+ regid = ab8500_regulator[id].hw_valid_regid[i];
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].hw_valid_mask[i];
+ if (val)
+ err = seq_printf(s, "1|");
+ else
+ err = seq_printf(s, "0|");
+ } else {
+ err = seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+ }
+
+ /* print voltage selection */
+ regid = ab8500_regulator[id].vsel_sel_regid;
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].vsel_sel_mask;
+ for (i = 0; i < 3; i++) {
+ if (val == ab8500_regulator[id].vsel_sel_val[i])
+ break;
+ }
+ if (i < 3)
+ seq_printf(s, "%i|", i + 1);
+ else
+ seq_printf(s, "-|");
+ } else {
+ seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+
+ for (i = 0; i < 3; i++) {
+ int volt;
+
+ regid = ab8500_regulator[id].vsel_regid[i];
+ if (regid) {
+ val = ab8500_register_state[status_state][regid]
+ & ab8500_regulator[id].vsel_mask[i];
+ volt = get_voltage(
+ ab8500_regulator[id].vsel_range[i],
+ ab8500_regulator[id].vsel_range_len[i],
+ val);
+ seq_printf(s, "%7i|", volt);
+ } else {
+ seq_printf(s, " |");
+ }
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+ }
+
+ err = seq_printf(s, "\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i, %i\n",
+ regid, __LINE__);
+
+ }
+ err = seq_printf(s,
+ "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+ err = seq_printf(s,
+ "Note! In HW mode, voltage selection is controlled by HW.\n");
+ if (err < 0)
+ dev_err(dev, "seq_printf overflow: %i\n", __LINE__);
+
+
+exit:
+ return 0;
+}
+
+static int ab8500_regulator_status_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* copy user data */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* convert */
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+
+ /* set suspend force setting */
+ if (user_val > NUM_REGULATOR_STATE) {
+ dev_err(dev, "debugfs error input > number of states\n");
+ return -EINVAL;
+ }
+
+ status_state = user_val;
+
+ return buf_size;
+}
+
+
+static int ab8500_regulator_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab8500_regulator_status_print,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_status_fops = {
+ .open = ab8500_regulator_status_open,
+ .write = ab8500_regulator_status_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_PM
+
+struct ab8500_force_reg {
+ char *name;
+ u8 bank;
+ u8 addr;
+ u8 mask;
+ u8 val;
+ bool restore;
+ u8 restore_val;
+ u8 unavailable;
+};
+
+static struct ab8500_force_reg ab8500_force_reg[] = {
+ {
+ /*
+ * SysClkCtrl
+ * OTP: 0x00, HSI: 0x06, suspend: 0x00/0x07 (value/mask)
+ * [ 2] USBClkEna = disable SysClk path to USB block
+ * [ 1] TVoutClkEna = disable 27Mhz clock to TVout block
+ * [ 0] TVoutPllEna = disable TVout pll
+ * (generate 27Mhz from SysClk)
+ */
+ .name = "SysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x0c,
+ .mask = 0x07,
+ .val = 0x00,
+ },
+ {
+ /*
+ * VsimSysClkCtrl
+ * OTP: 0x01, HSI: 0x21, suspend: 0x01/0xff (value/mask)
+ * [ 7] VsimSysClkReq8Valid = no connection
+ * [ 6] VsimSysClkReq7Valid = no connection
+ * [ 5] VsimSysClkReq6Valid = no connection
+ * [ 4] VsimSysClkReq5Valid = no connection
+ * [ 3] VsimSysClkReq4Valid = no connection
+ * [ 2] VsimSysClkReq3Valid = no connection
+ * [ 1] VsimSysClkReq2Valid = no connection
+ * [ 0] VsimSysClkReq1Valid = Vsim set by SysClkReq1
+ */
+ .name = "VsimSysClkCtrl",
+ .bank = 0x02,
+ .addr = 0x33,
+ .mask = 0xff,
+ .val = 0x01,
+ },
+ {
+ /*
+ * SysUlpClkCtrl1
+ * OTP: 0x00, HSI: 0x00, suspend: 0x00/0x0f (value/mask)
+ * [ 3] 4500SysClkReq = inactive
+ * [ 2] UlpClkReq = inactive
+ * [1:0] SysUlpClkIntSel[1:0] = no internal clock switching.
+ * Internal clock is SysClk.
+ */
+ .name = "SysUlpClkCtrl1",
+ .bank = 0x02,
+ .addr = 0x0b,
+ .mask = 0x0f,
+ .val = 0x00,
+ },
+ {
+ /*
+ * TVoutCtrl
+ * OTP: N/A, HSI: N/A, suspend: 0x00/0x03 (value/mask)
+ * [ 2] PlugTvOn = plug/unplug detection disabled
+ * [1:0] TvoutDacCtrl[1:0] = "0" forced on DAC input (test)
+ */
+ .name = "TVoutCtrl",
+ .bank = 0x06,
+ .addr = 0x80,
+ .mask = 0x03,
+ .val = 0x00,
+ },
+};
+
+static void ab9540_force_reg_update(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) {
+ if (ab8500_force_reg[i].bank == 0x02 &&
+ ab8500_force_reg[i].addr == 0x0C) {
+ /*
+ * SysClkCtrl
+ * OTP: 0x00, HSI: 0x06, suspend: 0x00/0x07 (value/mask)
+ * [ 2] USBClkEna = disable SysClk path to USB block
+ */
+ ab8500_force_reg[i].mask = 0x04;
+ ab8500_force_reg[i].val = 0x00;
+ } else if (ab8500_force_reg[i].bank == 0x06 &&
+ ab8500_force_reg[i].addr == 0x80) {
+ /* TVoutCtrl not supported by AB9540 */
+ ab8500_force_reg[i].unavailable = true;
+ }
+ }
+}
+
+void ab8500_regulator_debug_force(void)
+{
+ int ret, i;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_SUSPEND);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record suspend state.\n");
+
+ /* check if registers should be forced */
+ if (!setting_suspend_force)
+ goto exit;
+
+ /*
+ * Optimize href v2_v50_pwr board for ApSleep/ApDeepSleep
+ * power consumption measurements
+ */
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) {
+ if (ab8500_force_reg[i].unavailable)
+ continue;
+
+ dev_vdbg(&pdev->dev, "Save and set %s: "
+ "0x%02x, 0x%02x, 0x%02x, 0x%02x.\n",
+ ab8500_force_reg[i].name,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].val);
+
+ /* assume that register should be restored */
+ ab8500_force_reg[i].restore = true;
+
+ /* get register value before forcing it */
+ ret = abx500_get_register_interruptible(&pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ &ab8500_force_reg[i].restore_val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read %s.\n",
+ ab8500_force_reg[i].name);
+ ab8500_force_reg[i].restore = false;
+ break;
+ }
+
+ /* force register value */
+ ret = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].val);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to write %s.\n",
+ ab8500_force_reg[i].name);
+ ab8500_force_reg[i].restore = false;
+ }
+ }
+
+exit:
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(
+ AB8500_REGULATOR_STATE_SUSPEND_CORE);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record suspend state.\n");
+
+ return;
+}
+
+void ab8500_regulator_debug_restore(void)
+{
+ int ret, i;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME_CORE);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record resume state.\n");
+ for (i = ARRAY_SIZE(ab8500_force_reg) - 1; i >= 0; i--) {
+ if (ab8500_force_reg[i].unavailable)
+ continue;
+
+ /* restore register value */
+ if (ab8500_force_reg[i].restore) {
+ ret = abx500_mask_and_set_register_interruptible(
+ &pdev->dev,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].restore_val);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to restore %s.\n",
+ ab8500_force_reg[i].name);
+ dev_vdbg(&pdev->dev, "Restore %s: "
+ "0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ ab8500_force_reg[i].name,
+ ab8500_force_reg[i].bank,
+ ab8500_force_reg[i].addr,
+ ab8500_force_reg[i].mask,
+ ab8500_force_reg[i].restore_val);
+ }
+ }
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to record resume state.\n");
+
+ return;
+}
+
+#endif
+
+static int ab8500_regulator_suspend_force_show(struct seq_file *s, void *p)
+{
+ /* print suspend standby status */
+ if (setting_suspend_force)
+ return seq_printf(s, "suspend force enabled\n");
+ else
+ return seq_printf(s, "no suspend force\n");
+}
+
+static int ab8500_regulator_suspend_force_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+
+ /* copy user data */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /* convert */
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+
+ /* set suspend force setting */
+ if (user_val > 1) {
+ dev_err(dev, "debugfs error input > 1\n");
+ return -EINVAL;
+ }
+
+ if (user_val)
+ setting_suspend_force = true;
+ else
+ setting_suspend_force = false;
+
+ return buf_size;
+}
+
+static int ab8500_regulator_suspend_force_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ab8500_regulator_suspend_force_show,
+ inode->i_private);
+}
+
+static const struct file_operations ab8500_regulator_suspend_force_fops = {
+ .open = ab8500_regulator_suspend_force_open,
+ .write = ab8500_regulator_suspend_force_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab8500_regulator_dir;
+static struct dentry *ab8500_regulator_dump_file;
+static struct dentry *ab8500_regulator_status_file;
+static struct dentry *ab8500_regulator_suspend_force_file;
+
+int __devinit ab8500_regulator_debug_init(struct platform_device *plf)
+{
+ void __iomem *boot_info_backupram;
+ int ret;
+ struct ab8500 *ab8500;
+
+ /* setup dev pointers */
+ dev = &plf->dev;
+ pdev = plf;
+
+ /* save state of registers */
+ ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_INIT);
+ if (ret < 0)
+ dev_err(&plf->dev, "Failed to record init state.\n");
+
+ ab8500 = dev_get_drvdata(plf->dev.parent);
+ /* Update data structures for AB9540 */
+ if (is_ab9540(ab8500)) {
+ ab9540_registers_update();
+ ab9540_regulators_update();
+ ab9540_force_reg_update();
+ }
+ /* make suspend-force default if board profile is v5x-power */
+ boot_info_backupram = ioremap(BOOT_INFO_BACKUPRAM1, 0x4);
+
+ if (boot_info_backupram) {
+ u8 board_profile;
+ board_profile = readb(
+ boot_info_backupram + BOARD_PROFILE_BACKUPRAM1);
+ dev_dbg(dev, "Board profile is 0x%02x\n", board_profile);
+
+ if (board_profile >= OPTION_BOARD_VERSION_V5X)
+ setting_suspend_force = true;
+
+ iounmap(boot_info_backupram);
+ } else {
+ dev_err(dev, "Failed to read backupram.\n");
+ }
+
+ /* create directory */
+ ab8500_regulator_dir = debugfs_create_dir("ab8500-regulator", NULL);
+ if (!ab8500_regulator_dir)
+ goto exit_no_debugfs;
+
+ /* create "dump" file */
+ ab8500_regulator_dump_file = debugfs_create_file("dump",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_dump_fops);
+ if (!ab8500_regulator_dump_file)
+ goto exit_destroy_dir;
+
+ /* create "status" file */
+ ab8500_regulator_status_file = debugfs_create_file("status",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_status_fops);
+ if (!ab8500_regulator_status_file)
+ goto exit_destroy_dump_file;
+
+ /*
+ * create "suspend-force-v5x" file. As indicated by the name, this is
+ * only applicable for v2_v5x hardware versions.
+ */
+ ab8500_regulator_suspend_force_file = debugfs_create_file(
+ "suspend-force-v5x",
+ S_IRUGO, ab8500_regulator_dir, &plf->dev,
+ &ab8500_regulator_suspend_force_fops);
+ if (!ab8500_regulator_suspend_force_file)
+ goto exit_destroy_status_file;
+
+ return 0;
+
+exit_destroy_status_file:
+ debugfs_remove(ab8500_regulator_status_file);
+exit_destroy_dump_file:
+ debugfs_remove(ab8500_regulator_dump_file);
+exit_destroy_dir:
+ debugfs_remove(ab8500_regulator_dir);
+exit_no_debugfs:
+ dev_err(&plf->dev, "failed to create debugfs entries.\n");
+ return -ENOMEM;
+}
+
+int __devexit ab8500_regulator_debug_exit(struct platform_device *plf)
+{
+ debugfs_remove(ab8500_regulator_suspend_force_file);
+ debugfs_remove(ab8500_regulator_status_file);
+ debugfs_remove(ab8500_regulator_dump_file);
+ debugfs_remove(ab8500_regulator_dir);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com");
+MODULE_DESCRIPTION("AB8500 Regulator Debug");
+MODULE_ALIAS("platform:ab8500-regulator-debug");
diff --git a/drivers/regulator/ab8500-debug.h b/drivers/regulator/ab8500-debug.h
new file mode 100644
index 00000000000..2b59e556a3f
--- /dev/null
+++ b/drivers/regulator/ab8500-debug.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson.
+ *
+ * License Terms: GNU General Public License v2
+ */
+
+#ifndef __AB8500_DEBUG_H__
+#define __AB8500_DEBUG_H__
+
+/*
+ * regulator status print
+ */
+enum ab8500_regulator_id {
+ AB8500_VARM,
+ AB8500_VBBP,
+ AB8500_VBBN,
+ AB8500_VAPE,
+ AB8500_VSMPS1,
+ AB8500_VSMPS2,
+ AB8500_VSMPS3,
+ AB8500_VPLL,
+ AB8500_VREFDDR,
+ AB8500_VMOD,
+ AB8500_VEXTSUPPLY1,
+ AB8500_VEXTSUPPLY2,
+ AB8500_VEXTSUPPLY3,
+ AB8500_VRF1,
+ AB8500_VANA,
+ AB8500_VAUX1,
+ AB8500_VAUX2,
+ AB8500_VAUX3,
+ AB9540_VAUX4, /* Note: AB9540 only */
+ AB8500_VINTCORE,
+ AB8500_VTVOUT,
+ AB8500_VAUDIO,
+ AB8500_VANAMIC1,
+ AB8500_VANAMIC2,
+ AB8500_VDMIC,
+ AB8500_VUSB,
+ AB8500_VOTG,
+ AB8500_VBUSBIS,
+ AB8500_NUM_REGULATORS,
+};
+
+enum ab8500_regulator_mode {
+ AB8500_MODE_OFF = 0,
+ AB8500_MODE_ON,
+ AB8500_MODE_HW,
+ AB8500_MODE_LP
+};
+
+enum ab8500_regulator_hwmode {
+ AB8500_HWMODE_NONE = 0,
+ AB8500_HWMODE_HPLP,
+ AB8500_HWMODE_HPOFF,
+ AB8500_HWMODE_HP,
+ AB8500_HWMODE_HP2,
+};
+
+enum hwmode_auto {
+ HWM_OFF = 0,
+ HWM_ON = 1,
+ HWM_INVAL = 2,
+};
+
+struct ab8500_debug_regulator_status {
+ char *name;
+ enum ab8500_regulator_mode mode;
+ enum ab8500_regulator_hwmode hwmode;
+ enum hwmode_auto hwmode_auto[4];
+ int volt_selected;
+ int volt_len;
+ int volt[4];
+};
+
+int ab8500_regulator_debug_read(enum ab8500_regulator_id id,
+ struct ab8500_debug_regulator_status *s);
+#endif /* __AB8500_DEBUG_H__ */
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
new file mode 100644
index 00000000000..8a5064c07fb
--- /dev/null
+++ b/drivers/regulator/ab8500-ext.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ *
+ * This file is based on drivers/regulator/ab8500.c
+ *
+ * AB8500 external regulators
+ *
+ * ab8500-ext supports the following regulators:
+ * - VextSupply3
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/regulator/ab8500.h>
+
+/**
+ * struct ab8500_ext_regulator_info - ab8500 regulator information
+ * @dev: device pointer
+ * @desc: regulator description
+ * @rdev: regulator device
+ * @cfg: regulator configuration (extension of regulator FW configuration)
+ * @is_enabled: status of regulator (on/off)
+ * @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @update_bank: bank to control on/off
+ * @update_reg: register to control on/off
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_hp: bits to set EN pin active (LPn pin deactive)
+ * normally this means high power mode
+ * @update_val_lp: bits to set EN pin active and LPn pin active
+ * normally this means low power mode
+ * @update_val_hw: bits to set regulator pins in HW control
+ * SysClkReq pins and logic will choose mode
+ */
+struct ab8500_ext_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct ab8500_ext_regulator_cfg *cfg;
+ bool is_enabled;
+ int fixed_uV;
+ u8 update_bank;
+ u8 update_reg;
+ u8 update_mask;
+ u8 update_val;
+ u8 update_val_hp;
+ u8 update_val_lp;
+ u8 update_val_hw;
+};
+
+static int enable(struct ab8500_ext_regulator_info *info, u8 *regval)
+{
+ int ret;
+
+ *regval = info->update_val;
+
+ /*
+ * To satisfy both HW high power request and SW request, the regulator
+ * must be on in high power.
+ */
+ if (info->cfg && info->cfg->hwreq)
+ *regval = info->update_val_hp;
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, *regval);
+ if (ret < 0)
+ dev_err(rdev_get_dev(info->rdev),
+ "couldn't set enable bits for regulator\n");
+
+ info->is_enabled = true;
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = enable(info, &regval);
+
+ dev_dbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_set_suspend_enable(struct regulator_dev *rdev)
+{
+ dev_dbg(rdev_get_dev(rdev), "suspend: ");
+
+ return ab8500_ext_regulator_enable(rdev);
+}
+
+static int disable(struct ab8500_ext_regulator_info *info, u8 *regval)
+{
+ int ret;
+
+ *regval = 0x0;
+
+ /*
+ * Set the regulator in HW request mode if configured
+ */
+ if (info->cfg && info->cfg->hwreq)
+ *regval = info->update_val_hw;
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, *regval);
+ if (ret < 0)
+ dev_err(rdev_get_dev(info->rdev),
+ "couldn't set disable bits for regulator\n");
+
+ info->is_enabled = false;
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = disable(info, &regval);
+
+ dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ return ret;
+}
+
+static int ab8500_ext_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+ dev_dbg(rdev_get_dev(rdev), "suspend: ");
+
+ return ab8500_ext_regulator_disable(rdev);
+}
+
+static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_get_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, &regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read 0x%x register\n", info->update_reg);
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ if (((regval & info->update_mask) == info->update_val_lp) ||
+ ((regval & info->update_mask) == info->update_val_hp))
+ info->is_enabled = true;
+ else
+ info->is_enabled = false;
+
+ return info->is_enabled;
+}
+
+static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ info->update_val = info->update_val_hp;
+ break;
+ case REGULATOR_MODE_IDLE:
+ info->update_val = info->update_val_lp;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (info->is_enabled) {
+ u8 regval;
+
+ ret = enable(info, &regval);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "Could not set regulator mode.\n");
+
+ dev_dbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+ }
+
+ return ret;
+}
+
+static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->update_val == info->update_val_hp)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (info->update_val == info->update_val_lp)
+ ret = REGULATOR_MODE_IDLE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int ab8500_ext_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ return info->fixed_uV;
+}
+
+static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ /* return the uV for the fixed regulators */
+ if (info->fixed_uV)
+ return info->fixed_uV;
+
+ return -EINVAL;
+}
+
+static struct regulator_ops ab8500_ext_regulator_ops = {
+ .enable = ab8500_ext_regulator_enable,
+ .set_suspend_enable = ab8500_ext_regulator_set_suspend_enable,
+ .disable = ab8500_ext_regulator_disable,
+ .set_suspend_disable = ab8500_ext_regulator_set_suspend_disable,
+ .is_enabled = ab8500_ext_regulator_is_enabled,
+ .set_mode = ab8500_ext_regulator_set_mode,
+ .get_mode = ab8500_ext_regulator_get_mode,
+ .get_voltage = ab8500_ext_fixed_get_voltage,
+ .list_voltage = ab8500_ext_list_voltage,
+};
+
+static struct ab8500_ext_regulator_info
+ ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = {
+ [AB8500_EXT_SUPPLY1] = {
+ .desc = {
+ .name = "VEXTSUPPLY1",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1800000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_hp = 0x01,
+ .update_val_lp = 0x03,
+ .update_val_hw = 0x02,
+ },
+ [AB8500_EXT_SUPPLY2] = {
+ .desc = {
+ .name = "VEXTSUPPLY2",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1360000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_hp = 0x04,
+ .update_val_lp = 0x0c,
+ .update_val_hw = 0x08,
+ },
+ [AB8500_EXT_SUPPLY3] = {
+ .desc = {
+ .name = "VEXTSUPPLY3",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY3,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 3400000,
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x30,
+ .update_val = 0x10,
+ .update_val_hp = 0x10,
+ .update_val_lp = 0x30,
+ .update_val_hw = 0x20,
+ },
+};
+
+__devinit int ab8500_ext_regulator_init(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
+ int i, err;
+
+ if (!ab8500) {
+ dev_err(&pdev->dev, "null mfd parent\n");
+ return -EINVAL;
+ }
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
+ if (!pdata) {
+ dev_err(&pdev->dev, "null pdata\n");
+ return -EINVAL;
+ }
+
+ /* make sure the platform data has the correct size */
+ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
+ dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
+ return -EINVAL;
+ }
+
+ /* check for AB8500 2.x */
+ if (is_ab8500_2p0_or_earlier(ab8500)) {
+ struct ab8500_ext_regulator_info *info;
+
+ /* VextSupply3LPn is inverted on AB8500 2.x */
+ info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3];
+ info->update_val = 0x30;
+ info->update_val_hp = 0x30;
+ info->update_val_lp = 0x10;
+ }
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+
+ /* assign per-regulator data */
+ info = &ab8500_ext_regulator_info[i];
+ info->dev = &pdev->dev;
+ info->cfg = (struct ab8500_ext_regulator_cfg *)
+ pdata->ext_regulator[i].driver_data;
+
+ /* register regulator with framework */
+ info->rdev = regulator_register(&info->desc, &pdev->dev,
+ &pdata->ext_regulator[i], info, NULL);
+ if (IS_ERR(info->rdev)) {
+ err = PTR_ERR(info->rdev);
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ /* when we fail, un-register all earlier regulators */
+ while (--i >= 0) {
+ info = &ab8500_ext_regulator_info[i];
+ regulator_unregister(info->rdev);
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->rdev),
+ "%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+__devexit int ab8500_ext_regulator_exit(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+ info = &ab8500_ext_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->rdev),
+ "%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->rdev);
+ }
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 external regulator driver");
+MODULE_ALIAS("platform:ab8500-ext-regulator");
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index c7ee4c15d6f..71328249659 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -21,43 +21,55 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpio.h> /* for sysclkreq pins */
+#include <mach/id.h>
/**
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @regulator_dev: regulator device
+ * @is_enabled: status of regulator (on/off)
* @max_uV: maximum voltage (for variable voltage supplies)
* @min_uV: minimum voltage (for variable voltage supplies)
* @fixed_uV: typical voltage (for fixed voltage supplies)
+ * @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
- * @update_mask: mask to enable/disable regulator
- * @update_val_enable: bits to enable the regulator in normal (high power) mode
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_idle: bits to enable the regulator in idle (low power) mode
+ * @update_val_normal: bits to enable the regulator in normal (high power) mode
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
* @voltages: supported voltage table
* @voltages_len: number of supported voltages for the regulator
* @delay: startup/set voltage delay in us
+ * @gpio_pin: ab8500 gpio pin offset number (for sysclkreq regulator only)
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *regulator;
+ bool is_enabled;
int max_uV;
int min_uV;
int fixed_uV;
+ int load_lp_uA;
u8 update_bank;
u8 update_reg;
u8 update_mask;
- u8 update_val_enable;
+ u8 update_val;
+ u8 update_val_idle;
+ u8 update_val_normal;
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
int const *voltages;
int voltages_len;
unsigned int delay;
+ enum ab8500_pin gpio_pin;
};
/* voltage tables for the vauxn/vintcore supplies */
@@ -113,15 +125,17 @@ static int ab8500_regulator_enable(struct regulator_dev *rdev)
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
if (ret < 0)
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
+ info->is_enabled = true;
+
dev_vdbg(rdev_get_dev(rdev),
"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
return ret;
}
@@ -143,6 +157,8 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
+ info->is_enabled = false;
+
dev_vdbg(rdev_get_dev(rdev),
"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
@@ -151,6 +167,88 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
return ret;
}
+static unsigned int ab8500_regulator_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV,
+ int output_uV, int load_uA)
+{
+ unsigned int mode;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (load_uA <= info->load_lp_uA)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static int ab8500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ info->update_val = info->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ info->update_val = info->update_val_idle;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info->is_enabled) {
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ if (ret < 0)
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set regulator mode\n");
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, info->update_val);
+ }
+
+ return ret;
+}
+
+static unsigned int ab8500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->update_val == info->update_val_normal)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (info->update_val == info->update_val_idle)
+ ret = REGULATOR_MODE_IDLE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
{
int ret;
@@ -177,9 +275,11 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
info->update_mask, regval);
if (regval & info->update_mask)
- return true;
+ info->is_enabled = true;
else
- return false;
+ info->is_enabled = false;
+
+ return info->is_enabled;
}
static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
@@ -273,8 +373,13 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
*selector = ret;
+ /* vintcore register has a different layout */
+ if (info->desc.id == AB8500_LDO_INTCORE)
+ regval = ((u8)ret) << 3;
+ else
+ regval = (u8)ret;
+
/* set the registers for the request */
- regval = (u8)ret;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg,
info->voltage_mask, regval);
@@ -314,9 +419,12 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
return info->delay;
}
-static struct regulator_ops ab8500_regulator_ops = {
+static struct regulator_ops ab8500_regulator_volt_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage_sel = ab8500_regulator_get_voltage_sel,
.set_voltage = ab8500_regulator_set_voltage,
@@ -337,16 +445,116 @@ static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
return info->fixed_uV;
}
-static struct regulator_ops ab8500_regulator_fixed_ops = {
+static struct regulator_ops ab8500_regulator_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
.get_voltage = ab8500_fixed_get_voltage,
.list_voltage = ab8500_list_voltage,
.enable_time = ab8500_regulator_enable_time,
.set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
+static struct regulator_ops ab8500_regulator_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
+static int ab8500_sysclkreq_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, false);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set sysclkreq pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = true;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-enable (gpio_pin, gpio_select): %i, false\n",
+ info->desc.name, info->gpio_pin);
+
+ return ret;
+}
+
+static int ab8500_sysclkreq_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, true);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set gpio pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = false;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-disable (gpio_pin, gpio_select): %i, true\n",
+ info->desc.name, info->gpio_pin);
+
+ return ret;
+}
+
+static int ab8500_sysclkreq_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ bool gpio_select;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = ab8500_gpio_config_get_select(info->dev, info->gpio_pin,
+ &gpio_select);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read gpio pin selection\n");
+ return ret;
+ }
+
+ info->is_enabled = !gpio_select;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-is_enabled (gpio_pin, is_enabled): %i, %i\n",
+ info->desc.name, info->gpio_pin, !gpio_select);
+
+ return info->is_enabled;
+}
+
+static struct regulator_ops ab8500_sysclkreq_ops = {
+ .enable = ab8500_sysclkreq_enable,
+ .disable = ab8500_sysclkreq_disable,
+ .is_enabled = ab8500_sysclkreq_is_enabled,
+ .get_voltage = ab8500_fixed_get_voltage,
+ .list_voltage = ab8500_list_voltage,
+};
+
+/* AB8500 regulator information */
static struct ab8500_regulator_info
ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
/*
@@ -358,7 +566,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX1] = {
.desc = {
.name = "LDO-AUX1",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
@@ -366,10 +574,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
@@ -379,7 +590,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX2] = {
.desc = {
.name = "LDO-AUX2",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
@@ -387,10 +598,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
@@ -400,7 +614,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX3] = {
.desc = {
.name = "LDO-AUX3",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
@@ -408,10 +622,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
@@ -421,7 +638,7 @@ static struct ab8500_regulator_info
[AB8500_LDO_INTCORE] = {
.desc = {
.name = "LDO-INTCORE",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
@@ -429,10 +646,13 @@ static struct ab8500_regulator_info
},
.min_uV = 1100000,
.max_uV = 3300000,
+ .load_lp_uA = 5000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
- .update_val_enable = 0x04,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
@@ -448,7 +668,275 @@ static struct ab8500_regulator_info
[AB8500_LDO_TVOUT] = {
.desc = {
.name = "LDO-TVOUT",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_TVOUT,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .delay = 500,
+ .fixed_uV = 2000000,
+ .load_lp_uA = 1000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x82,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
+ },
+ [AB8500_LDO_AUDIO] = {
+ .desc = {
+ .name = "LDO-AUDIO",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2000000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x02,
+ .update_val = 0x02,
+ },
+ [AB8500_LDO_ANAMIC1] = {
+ .desc = {
+ .name = "LDO-ANAMIC1",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANAMIC1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2050000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x08,
+ .update_val = 0x08,
+ },
+ [AB8500_LDO_ANAMIC2] = {
+ .desc = {
+ .name = "LDO-ANAMIC2",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANAMIC2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 2050000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x10,
+ .update_val = 0x10,
+ },
+ [AB8500_LDO_DMIC] = {
+ .desc = {
+ .name = "LDO-DMIC",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_DMIC,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1800000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x04,
+ .update_val = 0x04,
+ },
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
+ [AB8500_LDO_ANA] = {
+ .desc = {
+ .name = "LDO-ANA",
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANA,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1200000,
+ .load_lp_uA = 1000,
+ .update_bank = 0x04,
+ .update_reg = 0x06,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ },
+
+ /*
+ * SysClkReq regulators
+ */
+ [AB8500_SYSCLKREQ_2] = {
+ .desc = {
+ .name = "SYSCLKREQ-2",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO1,
+ },
+ [AB8500_SYSCLKREQ_4] = {
+ .desc = {
+ .name = "SYSCLKREQ-4",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_4,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO3,
+ },
+};
+
+/* AB9540 regulator information */
+static struct ab8500_regulator_info
+ ab9540_regulator_info[AB9540_NUM_REGULATORS] = {
+ /*
+ * Variable Voltage Regulators
+ * name, min mV, max mV,
+ * update bank, reg, mask, enable val
+ * volt bank, reg, mask, table, table length
+ */
+ [AB9540_LDO_AUX1] = {
+ .desc = {
+ .name = "LDO-AUX1",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUX1,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x1f,
+ .voltage_mask = 0x0f,
+ .voltages = ldo_vauxn_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ [AB9540_LDO_AUX2] = {
+ .desc = {
+ .name = "LDO-AUX2",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUX2,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x20,
+ .voltage_mask = 0x0f,
+ .voltages = ldo_vauxn_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ [AB9540_LDO_AUX3] = {
+ .desc = {
+ .name = "LDO-AUX3",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUX3,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x0a,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x21,
+ .voltage_mask = 0x07,
+ .voltages = ldo_vaux3_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages),
+ },
+ [AB9540_LDO_AUX4] = {
+ .desc = {
+ .name = "LDO-AUX4",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_AUX4,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .load_lp_uA = 5000,
+ /* values for Vaux4Regu register */
+ .update_bank = 0x04,
+ .update_reg = 0x2e,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ /* values for Vaux4SEL register */
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x2f,
+ .voltage_mask = 0x0f,
+ .voltages = ldo_vauxn_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
+ },
+ [AB9540_LDO_INTCORE] = {
+ .desc = {
+ .name = "LDO-INTCORE",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_INTCORE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ },
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .load_lp_uA = 5000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x44,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x80,
+ .voltage_mask = 0x38,
+ .voltages = ldo_vintcore_voltages,
+ .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages),
+ },
+
+ /*
+ * Fixed Voltage Regulators
+ * name, fixed mV,
+ * update bank, reg, mask, enable val
+ */
+ [AB9540_LDO_TVOUT] = {
+ .desc = {
+ .name = "LDO-TVOUT",
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
@@ -456,17 +944,20 @@ static struct ab8500_regulator_info
},
.delay = 10000,
.fixed_uV = 2000000,
+ .load_lp_uA = 1000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
},
- [AB8500_LDO_USB] = {
+ [AB9540_LDO_USB] = {
.desc = {
.name = "LDO-USB",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_USB,
+ .id = AB9540_LDO_USB,
.owner = THIS_MODULE,
.n_voltages = 1,
},
@@ -474,12 +965,14 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x82,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
},
- [AB8500_LDO_AUDIO] = {
+ [AB9540_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUDIO,
.owner = THIS_MODULE,
@@ -489,12 +982,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
},
- [AB8500_LDO_ANAMIC1] = {
+ [AB9540_LDO_ANAMIC1] = {
.desc = {
.name = "LDO-ANAMIC1",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC1,
.owner = THIS_MODULE,
@@ -504,12 +997,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
- .update_val_enable = 0x08,
+ .update_val = 0x08,
},
- [AB8500_LDO_ANAMIC2] = {
+ [AB9540_LDO_ANAMIC2] = {
.desc = {
.name = "LDO-ANAMIC2",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANAMIC2,
.owner = THIS_MODULE,
@@ -519,12 +1012,12 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
- .update_val_enable = 0x10,
+ .update_val = 0x10,
},
- [AB8500_LDO_DMIC] = {
+ [AB9540_LDO_DMIC] = {
.desc = {
.name = "LDO-DMIC",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_DMIC,
.owner = THIS_MODULE,
@@ -534,25 +1027,58 @@ static struct ab8500_regulator_info
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
},
- [AB8500_LDO_ANA] = {
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
+ [AB9540_LDO_ANA] = {
.desc = {
.name = "LDO-ANA",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
},
.fixed_uV = 1200000,
+ .load_lp_uA = 1000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
},
-
+ /*
+ * SysClkReq regulators
+ */
+ [AB9540_SYSCLKREQ_2] = {
+ .desc = {
+ .name = "SYSCLKREQ-2",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO1,
+ },
+ [AB9540_SYSCLKREQ_4] = {
+ .desc = {
+ .name = "SYSCLKREQ-4",
+ .ops = &ab8500_sysclkreq_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_SYSCLKREQ_4,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .fixed_uV = 1, /* bogus value */
+ .gpio_pin = AB8500_PIN_GPIO3,
+ },
};
struct ab8500_reg_init {
@@ -568,13 +1094,13 @@ struct ab8500_reg_init {
.mask = _mask, \
}
+/* AB8500 register init */
static struct ab8500_reg_init ab8500_reg_init[] = {
/*
* 0x30, VanaRequestCtrl
- * 0x0C, VpllRequestCtrl
* 0xc0, VextSupply1RequestCtrl
*/
- REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc),
+ REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xf0),
/*
* 0x03, VextSupply2RequestCtrl
* 0x0c, VextSupply3RequestCtrl
@@ -641,13 +1167,21 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
/*
* 0x02, SysClkReq2Valid1
- * ...
+ * 0x04, SysClkReq3Valid1
+ * 0x08, SysClkReq4Valid1
+ * 0x10, SysClkReq5Valid1
+ * 0x20, SysClkReq6Valid1
+ * 0x40, SysClkReq7Valid1
* 0x80, SysClkReq8Valid1
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
/*
* 0x02, SysClkReq2Valid2
- * ...
+ * 0x04, SysClkReq3Valid2
+ * 0x08, SysClkReq4Valid2
+ * 0x10, SysClkReq5Valid2
+ * 0x20, SysClkReq6Valid2
+ * 0x40, SysClkReq7Valid2
* 0x80, SysClkReq8Valid2
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
@@ -672,8 +1206,8 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
/*
+ * 0x03, VpllRegu (NOTE! PRCMU register bits)
* 0x0c, VanaRegu
- * 0x03, VpllRegu
*/
REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
/*
@@ -699,10 +1233,6 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
/*
- * 0x3f, Vsmps1Sel1
- */
- REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f),
- /*
* 0x0f, Vaux1Sel
*/
REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
@@ -735,79 +1265,412 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
};
+/* Possibility to add debug */
+int __attribute__((weak)) ab8500_regulator_debug_init(
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+int __attribute__((weak)) ab8500_regulator_debug_exit(
+ struct platform_device *pdev)
+{
+ return 0;
+}
+
+/* AB9540 register init */
+static struct ab8500_reg_init ab9540_reg_init[] = {
+ /*
+ * 0x03, VarmRequestCtrl
+ * 0x0c, VapeRequestCtrl
+ * 0x30, Vsmps1RequestCtrl
+ * 0xc0, Vsmps2RequestCtrl
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
+ /*
+ * 0x03, Vsmps3RequestCtrl
+ * 0x0c, VpllRequestCtrl
+ * 0x30, VanaRequestCtrl
+ * 0xc0, VextSupply1RequestCtrl
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL2, 0x03, 0x04, 0xff),
+ /*
+ * 0x03, VextSupply2RequestCtrl
+ * 0x0c, VextSupply3RequestCtrl
+ * 0x30, Vaux1RequestCtrl
+ * 0xc0, Vaux2RequestCtrl
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
+ /*
+ * 0x03, Vaux3RequestCtrl
+ * 0x04, SwHPReq
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
+ /*
+ * 0x01, Vsmps1SysClkReq1HPValid
+ * 0x02, Vsmps2SysClkReq1HPValid
+ * 0x04, Vsmps3SysClkReq1HPValid
+ * 0x08, VanaSysClkReq1HPValid
+ * 0x10, VpllSysClkReq1HPValid
+ * 0x20, Vaux1SysClkReq1HPValid
+ * 0x40, Vaux2SysClkReq1HPValid
+ * 0x80, Vaux3SysClkReq1HPValid
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
+ /*
+ * 0x01, VapeSysClkReq1HPValid
+ * 0x02, VarmSysClkReq1HPValid
+ * 0x04, VbbSysClkReq1HPValid
+ * 0x08, VmodSysClkReq1HPValid
+ * 0x10, VextSupply1SysClkReq1HPValid
+ * 0x20, VextSupply2SysClkReq1HPValid
+ * 0x40, VextSupply3SysClkReq1HPValid
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x7f),
+ /*
+ * 0x01, Vsmps1HwHPReq1Valid
+ * 0x02, Vsmps2HwHPReq1Valid
+ * 0x04, Vsmps3HwHPReq1Valid
+ * 0x08, VanaHwHPReq1Valid
+ * 0x10, VpllHwHPReq1Valid
+ * 0x20, Vaux1HwHPReq1Valid
+ * 0x40, Vaux2HwHPReq1Valid
+ * 0x80, Vaux3HwHPReq1Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
+ /*
+ * 0x01, VextSupply1HwHPReq1Valid
+ * 0x02, VextSupply2HwHPReq1Valid
+ * 0x04, VextSupply3HwHPReq1Valid
+ * 0x08, VmodHwHPReq1Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x0f),
+ /*
+ * 0x01, Vsmps1HwHPReq2Valid
+ * 0x02, Vsmps2HwHPReq2Valid
+ * 0x03, Vsmps3HwHPReq2Valid
+ * 0x08, VanaHwHPReq2Valid
+ * 0x10, VpllHwHPReq2Valid
+ * 0x20, Vaux1HwHPReq2Valid
+ * 0x40, Vaux2HwHPReq2Valid
+ * 0x80, Vaux3HwHPReq2Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
+ /*
+ * 0x01, VextSupply1HwHPReq2Valid
+ * 0x02, VextSupply2HwHPReq2Valid
+ * 0x04, VextSupply3HwHPReq2Valid
+ * 0x08, VmodHwHPReq2Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x0f),
+ /*
+ * 0x01, VapeSwHPReqValid
+ * 0x02, VarmSwHPReqValid
+ * 0x04, Vsmps1SwHPReqValid
+ * 0x08, Vsmps2SwHPReqValid
+ * 0x10, Vsmps3SwHPReqValid
+ * 0x20, VanaSwHPReqValid
+ * 0x40, VpllSwHPReqValid
+ * 0x80, Vaux1SwHPReqValid
+ */
+ REG_INIT(AB9540_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
+ /*
+ * 0x01, Vaux2SwHPReqValid
+ * 0x02, Vaux3SwHPReqValid
+ * 0x04, VextSupply1SwHPReqValid
+ * 0x08, VextSupply2SwHPReqValid
+ * 0x10, VextSupply3SwHPReqValid
+ * 0x20, VmodSwHPReqValid
+ */
+ REG_INIT(AB9540_REGUSWHPREQVALID2, 0x03, 0x0e, 0x3f),
+ /*
+ * 0x02, SysClkReq2Valid1
+ * ...
+ * 0x80, SysClkReq8Valid1
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
+ /*
+ * 0x02, SysClkReq2Valid2
+ * ...
+ * 0x80, SysClkReq8Valid2
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
+ /*
+ * 0x01, Vaux4SwHPReqValid
+ * 0x02, Vaux4HwHPReq2Valid
+ * 0x04, Vaux4HwHPReq1Valid
+ * 0x08, Vaux4SysClkReq1HPValid
+ */
+ REG_INIT(AB9540_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
+ /*
+ * 0x02, VTVoutEna
+ * 0x04, Vintcore12Ena
+ * 0x38, Vintcore12Sel
+ * 0x40, Vintcore12LP
+ * 0x80, VTVoutLP
+ */
+ REG_INIT(AB9540_REGUMISC1, 0x03, 0x80, 0xfe),
+ /*
+ * 0x02, VaudioEna
+ * 0x04, VdmicEna
+ * 0x08, Vamic1Ena
+ * 0x10, Vamic2Ena
+ */
+ REG_INIT(AB9540_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
+ /*
+ * 0x01, Vamic1_dzout
+ * 0x02, Vamic2_dzout
+ */
+ REG_INIT(AB9540_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
+ /*
+ * 0x03, Vsmps1Regu
+ * 0x0c, Vsmps1SelCtrl
+ * 0x10, Vsmps1AutoMode
+ * 0x20, Vsmps1PWMMode
+ */
+ REG_INIT(AB9540_VSMPS1REGU, 0x04, 0x03, 0x3f),
+ /*
+ * 0x03, Vsmps2Regu
+ * 0x0c, Vsmps2SelCtrl
+ * 0x10, Vsmps2AutoMode
+ * 0x20, Vsmps2PWMMode
+ */
+ REG_INIT(AB9540_VSMPS2REGU, 0x04, 0x04, 0x3f),
+ /*
+ * 0x03, Vsmps3Regu
+ * 0x0c, Vsmps3SelCtrl
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB9540_VSMPS3REGU, 0x04, 0x05, 0x0f),
+ /*
+ * 0x03, VpllRegu
+ * 0x0c, VanaRegu
+ */
+ REG_INIT(AB9540_VPLLVANAREGU, 0x04, 0x06, 0x0f),
+ /*
+ * 0x03, VextSupply1Regu
+ * 0x0c, VextSupply2Regu
+ * 0x30, VextSupply3Regu
+ * 0x40, ExtSupply2Bypass
+ * 0x80, ExtSupply3Bypass
+ */
+ REG_INIT(AB9540_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
+ /*
+ * 0x03, Vaux1Regu
+ * 0x0c, Vaux2Regu
+ */
+ REG_INIT(AB9540_VAUX12REGU, 0x04, 0x09, 0x0f),
+ /*
+ * 0x0c, Vrf1Regu
+ * 0x03, Vaux3Regu
+ */
+ REG_INIT(AB9540_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
+ /*
+ * 0x3f, Vsmps1Sel1
+ */
+ REG_INIT(AB9540_VSMPS1SEL1, 0x04, 0x13, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel2
+ */
+ REG_INIT(AB9540_VSMPS1SEL2, 0x04, 0x14, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel3
+ */
+ REG_INIT(AB9540_VSMPS1SEL3, 0x04, 0x15, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel1
+ */
+ REG_INIT(AB9540_VSMPS2SEL1, 0x04, 0x17, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel2
+ */
+ REG_INIT(AB9540_VSMPS2SEL2, 0x04, 0x18, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel3
+ */
+ REG_INIT(AB9540_VSMPS2SEL3, 0x04, 0x19, 0x3f),
+ /*
+ * 0x7f, Vsmps3Sel1
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB9540_VSMPS3SEL1, 0x04, 0x1b, 0x7f),
+ /*
+ * 0x7f, Vsmps3Sel2
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB9540_VSMPS3SEL2, 0x04, 0x1c, 0x7f),
+ /*
+ * 0x0f, Vaux1Sel
+ */
+ REG_INIT(AB9540_VAUX1SEL, 0x04, 0x1f, 0x0f),
+ /*
+ * 0x0f, Vaux2Sel
+ */
+ REG_INIT(AB9540_VAUX2SEL, 0x04, 0x20, 0x0f),
+ /*
+ * 0x07, Vaux3Sel
+ * 0x30, Vrf1Sel
+ */
+ REG_INIT(AB9540_VRF1VAUX3SEL, 0x04, 0x21, 0x37),
+ /*
+ * 0x01, VextSupply12LP
+ */
+ REG_INIT(AB9540_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
+ /*
+ * 0x03, Vaux4RequestCtrl
+ */
+ REG_INIT(AB9540_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
+ /*
+ * 0x03, Vaux4Regu
+ */
+ REG_INIT(AB9540_VAUX4REGU, 0x04, 0x2e, 0x03),
+ /*
+ * 0x08, Vaux4Sel
+ */
+ REG_INIT(AB9540_VAUX4SEL, 0x04, 0x2f, 0x0f),
+ /*
+ * 0x01, VpllDisch
+ * 0x02, Vrf1Disch
+ * 0x04, Vaux1Disch
+ * 0x08, Vaux2Disch
+ * 0x10, Vaux3Disch
+ * 0x20, Vintcore12Disch
+ * 0x40, VTVoutDisch
+ * 0x80, VaudioDisch
+ */
+ REG_INIT(AB9540_REGUCTRLDISCH, 0x04, 0x43, 0xff),
+ /*
+ * 0x01, VsimDisch
+ * 0x02, VanaDisch
+ * 0x04, VdmicPullDownEna
+ * 0x08, VpllPullDownEna
+ * 0x10, VdmicDisch
+ */
+ REG_INIT(AB9540_REGUCTRLDISCH2, 0x04, 0x44, 0x1f),
+ /*
+ * 0x01, Vaux4Disch
+ */
+ REG_INIT(AB9540_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
+};
+
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *pdata;
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
int i, err;
+ struct ab8500_regulator_info *regulator_info;
+ int regulator_info_size;
+ struct ab8500_reg_init *reg_init;
+ int reg_init_size;
+ /* cache values needed repeatedly inside for-loops */
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
- pdata = dev_get_platdata(ab8500->dev);
+
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
if (!pdata) {
dev_err(&pdev->dev, "null pdata\n");
return -EINVAL;
}
+ if (is_ab9540(ab8500)) {
+ regulator_info = ab9540_regulator_info;
+ regulator_info_size = ARRAY_SIZE(ab9540_regulator_info);
+ reg_init = ab9540_reg_init;
+ reg_init_size = AB9540_NUM_REGULATOR_REGISTERS;
+ } else {
+ regulator_info = ab8500_regulator_info;
+ regulator_info_size = ARRAY_SIZE(ab8500_regulator_info);
+ reg_init = ab8500_reg_init;
+ reg_init_size = AB8500_NUM_REGULATOR_REGISTERS;
+ }
+
/* make sure the platform data has the correct size */
- if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
+ if (pdata->num_regulator != regulator_info_size) {
dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
return -EINVAL;
}
+ /* initialize debug (initial state is recorded with this call) */
+ err = ab8500_regulator_debug_init(pdev);
+ if (err)
+ return err;
+
/* initialize registers */
- for (i = 0; i < pdata->num_regulator_reg_init; i++) {
+ for (i = 0; i < pdata->num_reg_init; i++) {
int id;
- u8 value;
+ u8 mask, value;
- id = pdata->regulator_reg_init[i].id;
- value = pdata->regulator_reg_init[i].value;
+ id = pdata->reg_init[i].id;
+ mask = pdata->reg_init[i].mask;
+ value = pdata->reg_init[i].value;
/* check for configuration errors */
- if (id >= AB8500_NUM_REGULATOR_REGISTERS) {
- dev_err(&pdev->dev,
- "Configuration error: id outside range.\n");
- return -EINVAL;
- }
- if (value & ~ab8500_reg_init[id].mask) {
- dev_err(&pdev->dev,
- "Configuration error: value outside mask.\n");
- return -EINVAL;
- }
+ BUG_ON(id >= reg_init_size);
+ BUG_ON(value & ~mask);
+ BUG_ON(mask & ~reg_init[id].mask);
/* initialize register */
err = abx500_mask_and_set_register_interruptible(&pdev->dev,
- ab8500_reg_init[id].bank,
- ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ reg_init[id].bank,
+ reg_init[id].addr,
+ mask, value);
if (err < 0) {
dev_err(&pdev->dev,
"Failed to initialize 0x%02x, 0x%02x.\n",
- ab8500_reg_init[id].bank,
- ab8500_reg_init[id].addr);
+ reg_init[id].bank,
+ reg_init[id].addr);
return err;
}
dev_vdbg(&pdev->dev,
" init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
- ab8500_reg_init[id].bank,
- ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ reg_init[id].bank,
+ reg_init[id].addr,
+ mask, value);
+ }
+
+ /*
+ * This changes the default setting for VextSupply3Regu to low power.
+ * Active high or low is depending on OTP which is changed from ab8500v3.0.
+ * Remove this when ab8500v2.0 is no longer important.
+ * This only affects power consumption and it depends on the
+ * HREF OTP configurations.
+ */
+ if (is_ab8500_2p0_or_earlier(ab8500)) {
+ err = abx500_mask_and_set_register_interruptible(&pdev->dev,
+ AB8500_REGU_CTRL2, 0x08, 0x30, 0x30);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Failed to override 0x%02x, 0x%02x.\n",
+ AB8500_REGU_CTRL2, 0x08);
+ return err;
+ }
}
+ /* register external regulators (before Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_init(pdev);
+ if (err)
+ return err;
+
/* register all regulators */
- for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ for (i = 0; i < regulator_info_size; i++) {
struct ab8500_regulator_info *info = NULL;
/* assign per-regulator data */
- info = &ab8500_regulator_info[i];
+ info = &regulator_info[i];
info->dev = &pdev->dev;
/* fix for hardware before ab8500v2.0 */
- if (abx500_get_chip_id(info->dev) < 0x20) {
+ if (is_ab8500_1p1_or_earlier(ab8500)) {
if (info->desc.id == AB8500_LDO_AUX3) {
info->desc.n_voltages =
ARRAY_SIZE(ldo_vauxn_voltages);
@@ -827,7 +1690,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
info->desc.name);
/* when we fail, un-register all earlier regulators */
while (--i >= 0) {
- info = &ab8500_regulator_info[i];
+ info = &regulator_info[i];
regulator_unregister(info->regulator);
}
return err;
@@ -842,11 +1705,23 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
{
- int i;
+ int i, err;
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_regulator_info *regulator_info;
+ int regulator_info_size;
+
- for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ if (is_ab9540(ab8500)) {
+ regulator_info = ab9540_regulator_info;
+ regulator_info_size = ARRAY_SIZE(ab9540_regulator_info);
+ } else {
+ regulator_info = ab8500_regulator_info;
+ regulator_info_size = ARRAY_SIZE(ab8500_regulator_info);
+ }
+
+ for (i = 0; i < regulator_info_size; i++) {
struct ab8500_regulator_info *info = NULL;
- info = &ab8500_regulator_info[i];
+ info = &regulator_info[i];
dev_vdbg(rdev_get_dev(info->regulator),
"%s-remove\n", info->desc.name);
@@ -854,6 +1729,16 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
regulator_unregister(info->regulator);
}
+ /* remove external regulators (after Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_exit(pdev);
+ if (err)
+ return err;
+
+ /* remove regulator debug */
+ err = ab8500_regulator_debug_exit(pdev);
+ if (err)
+ return err;
+
return 0;
}
@@ -886,5 +1771,6 @@ module_exit(ab8500_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC");
MODULE_ALIAS("platform:ab8500-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 046fb1bd861..bc6c12c8a76 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -81,6 +81,7 @@ struct regulator {
struct device_attribute dev_attr;
struct regulator_dev *rdev;
struct dentry *debugfs;
+ int use;
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
@@ -199,11 +200,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
*/
if (!regulator->min_uV && !regulator->max_uV)
continue;
-
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+
+ if (regulator->use) {
+ if (*max_uV > regulator->max_uV)
+ *max_uV = regulator->max_uV;
+ if (*min_uV < regulator->min_uV)
+ *min_uV = regulator->min_uV;
+ }
}
if (*min_uV > *max_uV)
@@ -602,6 +605,32 @@ static ssize_t regulator_suspend_standby_state_show(struct device *dev,
static DEVICE_ATTR(suspend_standby_state, 0444,
regulator_suspend_standby_state_show, NULL);
+static ssize_t regulator_use_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ struct regulator *reg;
+ size_t size = 0;
+
+ if (rdev->use_count == 0)
+ return sprintf(buf, "no users\n");
+
+ list_for_each_entry(reg, &rdev->consumer_list, list) {
+ if (!reg->use)
+ continue;
+
+ if (reg->dev != NULL)
+ size += sprintf((buf + size), "%s (%d) ",
+ dev_name(reg->dev), reg->use);
+ else
+ size += sprintf((buf + size), "unknown (%d) ",
+ reg->use);
+ }
+ size += sprintf((buf + size), "\n");
+
+ return size;
+}
+static DEVICE_ATTR(use, 0444, regulator_use_show, NULL);
/*
* These are the only attributes are present for all regulators.
@@ -1491,12 +1520,8 @@ static int _regulator_enable(struct regulator_dev *rdev)
trace_regulator_enable_delay(rdev_get_name(rdev));
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
- }
+ if (delay)
+ usleep_range(delay, delay);
trace_regulator_enable_complete(rdev_get_name(rdev));
@@ -1540,6 +1565,8 @@ int regulator_enable(struct regulator *regulator)
if (ret != 0 && rdev->supply)
regulator_disable(rdev->supply);
+ else
+ regulator->use++;
return ret;
}
@@ -1613,6 +1640,9 @@ int regulator_disable(struct regulator *regulator)
if (ret == 0 && rdev->supply)
regulator_disable(rdev->supply);
+ if (ret == 0)
+ regulator->use--;
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
@@ -2699,6 +2729,10 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
struct regulator_ops *ops = rdev->desc->ops;
int status = 0;
+ status = device_create_file(dev, &dev_attr_use);
+ if (status < 0)
+ dev_warn(dev, "Create sysfs file \"use\" failed");
+
/* some attributes need specific methods to be displayed */
if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
(ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) {
diff --git a/drivers/regulator/db5500-prcmu.c b/drivers/regulator/db5500-prcmu.c
new file mode 100644
index 00000000000..189362ab8e0
--- /dev/null
+++ b/drivers/regulator/db5500-prcmu.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * Power domain regulators on DB5500
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/db5500-prcmu.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "dbx500-prcmu.h"
+static int db5500_regulator_enable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
+ info->desc.name);
+
+ if (!info->is_enabled) {
+ info->is_enabled = true;
+ if (!info->exclude_from_power_state)
+ power_state_active_enable();
+ }
+
+ return 0;
+}
+
+static int db5500_regulator_disable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
+ info->desc.name);
+
+ if (info->is_enabled) {
+ info->is_enabled = false;
+ if (!info->exclude_from_power_state)
+ ret = power_state_active_disable();
+ }
+
+ return ret;
+}
+
+static int db5500_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
+ " %i\n", info->desc.name, info->is_enabled);
+
+ return info->is_enabled;
+}
+
+/* db5500 regulator operations */
+static struct regulator_ops db5500_regulator_ops = {
+ .enable = db5500_regulator_enable,
+ .disable = db5500_regulator_disable,
+ .is_enabled = db5500_regulator_is_enabled,
+};
+
+/*
+ * EPOD control
+ */
+static bool epod_on[NUM_EPOD_ID];
+static bool epod_ramret[NUM_EPOD_ID];
+
+static inline int epod_id_to_index(u16 epod_id)
+{
+ return epod_id - DB5500_EPOD_ID_BASE;
+}
+
+static int enable_epod(u16 epod_id, bool ramret)
+{
+ int idx = epod_id_to_index(epod_id);
+ int ret;
+
+ if (ramret) {
+ if (!epod_on[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
+ if (ret < 0)
+ return ret;
+ }
+ epod_ramret[idx] = true;
+ } else {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
+ if (ret < 0)
+ return ret;
+ epod_on[idx] = true;
+ }
+
+ return 0;
+}
+
+static int disable_epod(u16 epod_id, bool ramret)
+{
+ int idx = epod_id_to_index(epod_id);
+ int ret;
+
+ if (ramret) {
+ if (!epod_on[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
+ if (ret < 0)
+ return ret;
+ }
+ epod_ramret[idx] = false;
+ } else {
+ if (epod_ramret[idx]) {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
+ if (ret < 0)
+ return ret;
+ }
+ epod_on[idx] = false;
+ }
+
+ return 0;
+}
+
+/*
+ * Regulator switch
+ */
+static int db5500_regulator_switch_enable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
+ info->desc.name);
+
+ ret = enable_epod(info->epod_id, info->is_ramret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "regulator-switch-%s-enable: prcmu call failed\n",
+ info->desc.name);
+ goto out;
+ }
+
+ info->is_enabled = true;
+out:
+ return ret;
+}
+
+static int db5500_regulator_switch_disable(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
+ info->desc.name);
+
+ ret = disable_epod(info->epod_id, info->is_ramret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "regulator_switch-%s-disable: prcmu call failed\n",
+ info->desc.name);
+ goto out;
+ }
+
+ info->is_enabled = 0;
+out:
+ return ret;
+}
+
+static int db5500_regulator_switch_is_enabled(struct regulator_dev *rdev)
+{
+ struct dbx500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL)
+ return -EINVAL;
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "regulator-switch-%s-is_enabled (is_enabled): %i\n",
+ info->desc.name, info->is_enabled);
+
+ return info->is_enabled;
+}
+
+static struct regulator_ops db5500_regulator_switch_ops = {
+ .enable = db5500_regulator_switch_enable,
+ .disable = db5500_regulator_switch_disable,
+ .is_enabled = db5500_regulator_switch_is_enabled,
+};
+
+/*
+ * Regulator information
+ */
+#define DB5500_REGULATOR_SWITCH(_name, reg) \
+ [DB5500_REGULATOR_SWITCH_##reg] = { \
+ .desc = { \
+ .name = _name, \
+ .id = DB5500_REGULATOR_SWITCH_##reg, \
+ .ops = &db5500_regulator_switch_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .epod_id = DB5500_EPOD_ID_##reg, \
+}
+
+static struct dbx500_regulator_info
+ dbx500_regulator_info[DB5500_NUM_REGULATORS] = {
+ [DB5500_REGULATOR_VAPE] = {
+ .desc = {
+ .name = "db5500-vape",
+ .id = DB5500_REGULATOR_VAPE,
+ .ops = &db5500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ },
+ },
+ DB5500_REGULATOR_SWITCH("db5500-sga", SGA),
+ DB5500_REGULATOR_SWITCH("db5500-hva", HVA),
+ DB5500_REGULATOR_SWITCH("db5500-sia", SIA),
+ DB5500_REGULATOR_SWITCH("db5500-disp", DISP),
+ DB5500_REGULATOR_SWITCH("db5500-esram12", ESRAM12),
+};
+
+static int __devinit db5500_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_init_data *db5500_init_data =
+ dev_get_platdata(&pdev->dev);
+ int i, err;
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ struct regulator_init_data *init_data = &db5500_init_data[i];
+
+ /* assign per-regulator data */
+ info = &dbx500_regulator_info[i];
+ info->dev = &pdev->dev;
+
+ /* register with the regulator framework */
+ info->rdev = regulator_register(&info->desc, &pdev->dev,
+ init_data, info);
+ if (IS_ERR(info->rdev)) {
+ err = PTR_ERR(info->rdev);
+ dev_err(&pdev->dev, "failed to register %s: err %i\n",
+ info->desc.name, err);
+
+ /* if failing, unregister all earlier regulators */
+ i--;
+ while (i >= 0) {
+ info = &dbx500_regulator_info[i];
+ regulator_unregister(info->rdev);
+ i--;
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->rdev),
+ "regulator-%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+static int __exit db5500_regulator_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+ struct dbx500_regulator_info *info;
+ info = &dbx500_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->rdev),
+ "regulator-%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->rdev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver db5500_regulator_driver = {
+ .driver = {
+ .name = "db5500-prcmu-regulators",
+ .owner = THIS_MODULE,
+ },
+ .probe = db5500_regulator_probe,
+ .remove = __exit_p(db5500_regulator_remove),
+};
+
+static int __init db5500_regulator_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&db5500_regulator_driver);
+ if (ret < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit db5500_regulator_exit(void)
+{
+ platform_driver_unregister(&db5500_regulator_driver);
+}
+
+arch_initcall(db5500_regulator_init);
+module_exit(db5500_regulator_exit);
+
+MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
+MODULE_DESCRIPTION("DB5500 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index f2e5ecdc586..bee4f7be93b 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -9,6 +9,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/regulator/driver.h>
#include <linux/debugfs.h>
@@ -62,12 +63,105 @@ out:
return ret;
}
+struct ux500_regulator {
+ char *name;
+ void (*enable)(void);
+ int (*disable)(void);
+ int count;
+};
+
+static struct ux500_regulator ux500_atomic_regulators[] = {
+ {
+ .name = "dma40.0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi2",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi3",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "cryp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "hash1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+};
+
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) {
+ if (!strcmp(dev_name(dev), ux500_atomic_regulators[i].name))
+ return &ux500_atomic_regulators[i];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_get);
+
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ if (regulator) {
+ regulator->count++;
+ regulator->enable();
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_atomic_enable);
+
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ if (regulator) {
+ regulator->count--;
+ return regulator->disable();
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_atomic_disable);
+
+void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+ /* Here for symetric reasons and for possible future use */
+}
+EXPORT_SYMBOL_GPL(ux500_regulator_put);
+
#ifdef CONFIG_REGULATOR_DEBUG
static struct ux500_regulator_debug {
struct dentry *dir;
- struct dentry *status_file;
- struct dentry *power_state_cnt_file;
struct dbx500_regulator_info *regulator_array;
int num_regulators;
u8 *state_before_suspend;
@@ -119,6 +213,35 @@ static const struct file_operations ux500_regulator_power_state_cnt_fops = {
.owner = THIS_MODULE,
};
+static int ux500_regulator_power_state_use_print(struct seq_file *s, void *p)
+{
+ int i;
+
+ seq_printf(s, "\nPower state usage:\n\n");
+
+ for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) {
+ seq_printf(s, "%s\t : %d\n",
+ ux500_atomic_regulators[i].name,
+ ux500_atomic_regulators[i].count);
+ }
+ return 0;
+}
+
+static int ux500_regulator_power_state_use_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ux500_regulator_power_state_use_print,
+ inode->i_private);
+}
+
+static const struct file_operations ux500_regulator_power_state_use_fops = {
+ .open = ux500_regulator_power_state_use_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static int ux500_regulator_status_print(struct seq_file *s, void *p)
{
struct device *dev = s->private;
@@ -180,22 +303,27 @@ ux500_regulator_debug_init(struct platform_device *pdev,
{
/* create directory */
rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
- if (!rdebug.dir)
+ if (IS_ERR_OR_NULL(rdebug.dir))
goto exit_no_debugfs;
/* create "status" file */
- rdebug.status_file = debugfs_create_file("status",
- S_IRUGO, rdebug.dir, &pdev->dev,
- &ux500_regulator_status_fops);
- if (!rdebug.status_file)
- goto exit_destroy_dir;
+ if (IS_ERR_OR_NULL(debugfs_create_file("status",
+ S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_status_fops)))
+ goto exit_fail;
+
+ /* create "power-state-count" file */
+ if (IS_ERR_OR_NULL(debugfs_create_file("power-state-count",
+ S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_power_state_cnt_fops)))
+ goto exit_fail;
/* create "power-state-count" file */
- rdebug.power_state_cnt_file = debugfs_create_file("power-state-count",
- S_IRUGO, rdebug.dir, &pdev->dev,
- &ux500_regulator_power_state_cnt_fops);
- if (!rdebug.power_state_cnt_file)
- goto exit_destroy_status;
+ if (IS_ERR_OR_NULL(debugfs_create_file("power-state-usage",
+ S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_power_state_use_fops)))
+ goto exit_fail;
+
rdebug.regulator_array = regulator_info;
rdebug.num_regulators = num_regulators;
@@ -204,27 +332,22 @@ ux500_regulator_debug_init(struct platform_device *pdev,
if (!rdebug.state_before_suspend) {
dev_err(&pdev->dev,
"could not allocate memory for saving state\n");
- goto exit_destroy_power_state;
+ goto exit_fail;
}
rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
if (!rdebug.state_after_suspend) {
dev_err(&pdev->dev,
"could not allocate memory for saving state\n");
- goto exit_free;
+ goto exit_fail;
}
dbx500_regulator_testcase(regulator_info, num_regulators);
return 0;
-exit_free:
+exit_fail:
kfree(rdebug.state_before_suspend);
-exit_destroy_power_state:
- debugfs_remove(rdebug.power_state_cnt_file);
-exit_destroy_status:
- debugfs_remove(rdebug.status_file);
-exit_destroy_dir:
- debugfs_remove(rdebug.dir);
+ debugfs_remove_recursive(rdebug.dir);
exit_no_debugfs:
dev_err(&pdev->dev, "failed to create debugfs entries.\n");
return -ENOMEM;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8c8377d50c4..3e47885660e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -704,6 +704,13 @@ config RTC_DRV_PCF50633
If you say yes here you get support for the RTC subsystem of the
NXP PCF50633 used in embedded systems.
+config RTC_DRV_AB
+ tristate "ST-Ericsson AB5500 RTC"
+ depends on AB5500_CORE
+ help
+ Select this to enable the ST-Ericsson AB5500 Mixed Signal IC RTC
+ support. This chip contains a battery- and capacitor-backed RTC.
+
config RTC_DRV_AB3100
tristate "ST-Ericsson AB3100 RTC"
depends on AB3100_CORE
@@ -715,6 +722,7 @@ config RTC_DRV_AB3100
config RTC_DRV_AB8500
tristate "ST-Ericsson AB8500 RTC"
depends on AB8500_CORE
+ select RTC_INTF_DEV_UIE_EMUL
help
Select this to enable the ST-Ericsson AB8500 power management IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae7786e6..56766bbc519 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -16,6 +16,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
+obj-$(CONFIG_RTC_DRV_AB) += rtc-ab.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
diff --git a/drivers/rtc/rtc-ab.c b/drivers/rtc/rtc-ab.c
new file mode 100644
index 00000000000..009409f39d7
--- /dev/null
+++ b/drivers/rtc/rtc-ab.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
+
+#define AB5500_RTC_CLOCK_RATE 32768
+#define AB5500_RTC 0x00
+#define AB5500_RTC_ALARM (1 << 1)
+#define AB5500_READREQ 0x01
+#define AB5500_READREQ_REQ 0x01
+#define AB5500_AL0 0x02
+#define AB5500_TI0 0x06
+
+/**
+ * struct ab_rtc - variant specific data
+ * @irqname: optional name for the alarm interrupt resource
+ * @epoch: epoch to adjust year to
+ * @bank: AB bank where this block is present
+ * @rtc: address of the "RTC" (control) register
+ * @rtc_alarmon: mask of the alarm enable bit in the above register
+ * @ti0: address of the TI0 register. The rest of the TI
+ * registers are assumed to contiguously follow this one.
+ * @nr_ti: number of TI* registers
+ * @al0: address of the AL0 register. The rest of the
+ * AL registers are assumed to contiguously follow this one.
+ * @nr_al: number of AL* registers
+ * @startup: optional function to initialize the RTC
+ * @alarm_to_regs: function to convert alarm time in seconds
+ * to a list of AL register values
+ * @time_to_regs: function to convert alarm time in seconds
+ * to a list of TI register values
+ * @regs_to_alarm: function to convert a list of AL register
+ * values to the alarm time in seconds
+ * @regs_to_time: function to convert a list of TI register
+ * values to the alarm time in seconds
+ * @request_read: optional function to request a read from the TI* registers
+ * @request_write: optional function to request a write to the TI* registers
+ */
+struct ab_rtc {
+ const char *irqname;
+ unsigned int epoch;
+
+ u8 bank;
+ u8 rtc;
+ u8 rtc_alarmon;
+ u8 ti0;
+ int nr_ti;
+ u8 al0;
+ int nr_al;
+
+ int (*startup)(struct device *dev);
+ void (*alarm_to_regs)(struct device *dev, unsigned long secs, u8 *regs);
+ void (*time_to_regs)(struct device *dev, unsigned long secs, u8 *regs);
+ unsigned long (*regs_to_alarm)(struct device *dev, u8 *regs);
+ unsigned long (*regs_to_time)(struct device *dev, u8 *regs);
+ int (*request_read)(struct device *dev);
+ int (*request_write)(struct device *dev);
+};
+
+static const struct ab_rtc *to_ab_rtc(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ return (struct ab_rtc *)pdev->id_entry->driver_data;
+}
+
+/* Calculate the number of seconds since year, for epoch adjustment */
+static unsigned long ab_rtc_get_elapsed_seconds(unsigned int year)
+{
+ unsigned long secs;
+ struct rtc_time tm = {
+ .tm_year = year - 1900,
+ .tm_mday = 1,
+ };
+
+ rtc_tm_to_time(&tm, &secs);
+
+ return secs;
+}
+
+static int ab5500_rtc_request_read(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned long timeout;
+ int err;
+
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ AB5500_READREQ,
+ AB5500_READREQ_REQ);
+ if (err < 0)
+ return err;
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout)) {
+ u8 value;
+
+ err = abx500_get_register_interruptible(dev, variant->bank,
+ AB5500_READREQ, &value);
+ if (err < 0)
+ return err;
+
+ if (!(value & AB5500_READREQ_REQ))
+ return 0;
+
+ msleep(1);
+ }
+
+ return -EIO;
+}
+
+static void
+ab5500_rtc_time_to_regs(struct device *dev, unsigned long secs, u8 *regs)
+{
+ unsigned long mins = secs / 60;
+ u64 fat_time;
+
+ secs %= 60;
+
+ fat_time = secs * AB5500_RTC_CLOCK_RATE;
+ fat_time |= (u64)mins << 21;
+
+ regs[0] = (fat_time) & 0xFF;
+ regs[1] = (fat_time >> 8) & 0xFF;
+ regs[2] = (fat_time >> 16) & 0xFF;
+ regs[3] = (fat_time >> 24) & 0xFF;
+ regs[4] = (fat_time >> 32) & 0xFF;
+ regs[5] = (fat_time >> 40) & 0xFF;
+}
+
+static unsigned long
+ab5500_rtc_regs_to_time(struct device *dev, u8 *regs)
+{
+ u64 fat_time = ((u64)regs[5] << 40) | ((u64)regs[4] << 32) |
+ ((u64)regs[3] << 24) | ((u64)regs[2] << 16) |
+ ((u64)regs[1] << 8) | regs[0];
+ unsigned long secs = (fat_time & 0x1fffff) / AB5500_RTC_CLOCK_RATE;
+ unsigned long mins = fat_time >> 21;
+
+ return mins * 60 + secs;
+}
+
+static void
+ab5500_rtc_alarm_to_regs(struct device *dev, unsigned long secs, u8 *regs)
+{
+ unsigned long mins = secs / 60;
+
+#ifdef CONFIG_ANDROID
+ /*
+ * Needed because Android believes all hw have a wake-up resolution in
+ * seconds.
+ */
+ mins++;
+#endif
+
+ regs[0] = mins & 0xFF;
+ regs[1] = (mins >> 8) & 0xFF;
+ regs[2] = (mins >> 16) & 0xFF;
+}
+
+static unsigned long
+ab5500_rtc_regs_to_alarm(struct device *dev, u8 *regs)
+{
+ unsigned long mins = ((unsigned long)regs[2] << 16) |
+ ((unsigned long)regs[1] << 8) |
+ regs[0];
+ unsigned long secs = mins * 60;
+
+ return secs;
+}
+
+static const struct ab_rtc ab5500_rtc = {
+ .irqname = "RTC_Alarm",
+ .bank = AB5500_BANK_RTC,
+ .rtc = AB5500_RTC,
+ .rtc_alarmon = AB5500_RTC_ALARM,
+ .ti0 = AB5500_TI0,
+ .nr_ti = 6,
+ .al0 = AB5500_AL0,
+ .nr_al = 3,
+ .epoch = 2000,
+ .time_to_regs = ab5500_rtc_time_to_regs,
+ .regs_to_time = ab5500_rtc_regs_to_time,
+ .alarm_to_regs = ab5500_rtc_alarm_to_regs,
+ .regs_to_alarm = ab5500_rtc_regs_to_alarm,
+ .request_read = ab5500_rtc_request_read,
+};
+
+static int ab_rtc_request_read(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->request_read)
+ return 0;
+
+ return variant->request_read(dev);
+}
+
+static int ab_rtc_request_write(struct device *dev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->request_write)
+ return 0;
+
+ return variant->request_write(dev);
+}
+
+static bool ab_rtc_valid_time(struct device *dev, struct rtc_time *time)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (!variant->epoch)
+ return true;
+
+ return time->tm_year >= variant->epoch - 1900;
+}
+
+static int
+ab_rtc_tm_to_time(struct device *dev, struct rtc_time *tm, unsigned long *secs)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ rtc_tm_to_time(tm, secs);
+
+ if (variant->epoch)
+ *secs -= ab_rtc_get_elapsed_seconds(variant->epoch);
+
+ return 0;
+}
+
+static int
+ab_rtc_time_to_tm(struct device *dev, unsigned long secs, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+
+ if (variant->epoch)
+ secs += ab_rtc_get_elapsed_seconds(variant->epoch);
+
+ rtc_time_to_tm(secs, tm);
+
+ return 0;
+}
+
+static int ab_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_ti];
+ unsigned long secs;
+ int err;
+
+ err = ab_rtc_request_read(dev);
+ if (err)
+ return err;
+
+ err = abx500_get_register_page_interruptible(dev, variant->bank,
+ variant->ti0,
+ buf, variant->nr_ti);
+ if (err)
+ return err;
+
+ secs = variant->regs_to_time(dev, buf);
+ ab_rtc_time_to_tm(dev, secs, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int ab_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_ti];
+ unsigned long secs;
+ u8 reg = variant->ti0;
+ int err;
+ int i;
+
+ if (!ab_rtc_valid_time(dev, tm))
+ return -EINVAL;
+
+ ab_rtc_tm_to_time(dev, tm, &secs);
+ variant->time_to_regs(dev, secs, buf);
+
+ for (i = 0; i < variant->nr_ti; i++, reg++) {
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ reg, buf[i]);
+ if (err)
+ return err;
+ }
+
+ return ab_rtc_request_write(dev);
+}
+
+static int ab_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned long secs;
+ u8 buf[variant->nr_al];
+ u8 rtcval;
+ int err;
+
+ err = abx500_get_register_interruptible(dev, variant->bank,
+ variant->rtc, &rtcval);
+ if (err)
+ return err;
+
+ alarm->enabled = !!(rtcval & variant->rtc_alarmon);
+ alarm->pending = 0;
+
+ err = abx500_get_register_page_interruptible(dev, variant->bank,
+ variant->al0, buf,
+ variant->nr_al);
+ if (err)
+ return err;
+
+ secs = variant->regs_to_alarm(dev, buf);
+ ab_rtc_time_to_tm(dev, secs, &alarm->time);
+
+ return rtc_valid_tm(&alarm->time);
+}
+
+static int ab_rtc_alarm_enable(struct device *dev, unsigned int enabled)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ u8 mask = variant->rtc_alarmon;
+ u8 value = enabled ? mask : 0;
+
+ return abx500_mask_and_set_register_interruptible(dev, variant->bank,
+ variant->rtc, mask,
+ value);
+}
+
+static int ab_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ const struct ab_rtc *variant = to_ab_rtc(dev);
+ unsigned char buf[variant->nr_al];
+ unsigned long secs;
+ u8 reg = variant->al0;
+ int err;
+ int i;
+
+ if (!ab_rtc_valid_time(dev, &alarm->time))
+ return -EINVAL;
+
+ ab_rtc_tm_to_time(dev, &alarm->time, &secs);
+ variant->alarm_to_regs(dev, secs, buf);
+
+ /*
+ * Disable alarm first. Otherwise the RTC may not detect an alarm
+ * reprogrammed for the same time without disabling the alarm in
+ * between the programmings.
+ */
+ err = ab_rtc_alarm_enable(dev, false);
+ if (err)
+ return err;
+
+ for (i = 0; i < variant->nr_al; i++, reg++) {
+ err = abx500_set_register_interruptible(dev, variant->bank,
+ reg, buf[i]);
+ if (err)
+ return err;
+ }
+
+ return alarm->enabled ? ab_rtc_alarm_enable(dev, true) : 0;
+}
+
+static const struct rtc_class_ops ab_rtc_ops = {
+ .read_time = ab_rtc_read_time,
+ .set_time = ab_rtc_set_time,
+ .read_alarm = ab_rtc_read_alarm,
+ .set_alarm = ab_rtc_set_alarm,
+ .alarm_irq_enable = ab_rtc_alarm_enable,
+};
+
+static irqreturn_t ab_rtc_irq(int irq, void *dev_id)
+{
+ unsigned long events = RTC_IRQF | RTC_AF;
+ struct rtc_device *rtc = dev_id;
+
+ rtc_update_irq(rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit ab_rtc_probe(struct platform_device *pdev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(&pdev->dev);
+ int err;
+ struct rtc_device *rtc;
+ int irq = -ENXIO;
+
+ if (variant->irqname) {
+ irq = platform_get_irq_byname(pdev, variant->irqname);
+ if (irq < 0)
+ return irq;
+ }
+
+ if (variant->startup) {
+ err = variant->startup(&pdev->dev);
+ if (err)
+ return err;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+
+ rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "Registration failed\n");
+ err = PTR_ERR(rtc);
+ return err;
+ }
+
+ if (irq >= 0) {
+ err = request_any_context_irq(irq, ab_rtc_irq,
+ IRQF_NO_SUSPEND,
+ pdev->id_entry->name,
+ rtc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "could not get irq: %d\n", err);
+ goto out_unregister;
+ }
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ return 0;
+
+out_unregister:
+ rtc_device_unregister(rtc);
+ return err;
+}
+
+static int __devexit ab_rtc_remove(struct platform_device *pdev)
+{
+ const struct ab_rtc *variant = to_ab_rtc(&pdev->dev);
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, variant->irqname);
+
+ if (irq >= 0)
+ free_irq(irq, rtc);
+ rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_device_id ab_rtc_id_table[] = {
+ { "ab5500-rtc", (kernel_ulong_t)&ab5500_rtc, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, ab_rtc_id_table);
+
+static struct platform_driver ab_rtc_driver = {
+ .driver.name = "ab-rtc",
+ .driver.owner = THIS_MODULE,
+ .id_table = ab_rtc_id_table,
+ .probe = ab_rtc_probe,
+ .remove = __devexit_p(ab_rtc_remove),
+};
+
+static int __init ab_rtc_init(void)
+{
+ return platform_driver_register(&ab_rtc_driver);
+}
+module_init(ab_rtc_init);
+
+static void __exit ab_rtc_exit(void)
+{
+ platform_driver_unregister(&ab_rtc_driver);
+}
+module_exit(ab_rtc_exit);
+
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_DESCRIPTION("AB5500 RTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 4bcf9ca2818..63b3a672a17 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -88,22 +88,17 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (retval < 0)
return retval;
- /* Early AB8500 chips will not clear the rtc read request bit */
- if (abx500_get_chip_id(dev) == 0) {
- usleep_range(1000, 1000);
- } else {
- /* Wait for some cycles after enabling the rtc read in ab8500 */
- while (time_before(jiffies, timeout)) {
- retval = abx500_get_register_interruptible(dev,
- AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value);
- if (retval < 0)
- return retval;
-
- if (!(value & RTC_READ_REQUEST))
- break;
-
- usleep_range(1000, 5000);
- }
+ /* Wait for some cycles after enabling the rtc read in ab8500 */
+ while (time_before(jiffies, timeout)) {
+ retval = abx500_get_register_interruptible(dev,
+ AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value);
+ if (retval < 0)
+ return retval;
+
+ if (!(value & RTC_READ_REQUEST))
+ break;
+
+ usleep_range(1000, 5000);
}
/* Read the Watchtime registers */
@@ -224,8 +219,8 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
int retval, i;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
- unsigned long mins, secs = 0;
-
+ unsigned long mins, secs = 0, cursec=0;
+ struct rtc_time curtm;
if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
dev_dbg(dev, "year should be equal to or greater than %d\n",
AB8500_RTC_EPOCH);
@@ -235,14 +230,36 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* Get the number of seconds since 1970 */
rtc_tm_to_time(&alarm->time, &secs);
+ /* Check whether alarm is set less than 1min.
+ * Since our RTC doesn't support alarm resolution less than 1min,
+ * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON
+ */
+ ab8500_rtc_read_time(dev, &curtm); /* Read current time */
+ rtc_tm_to_time(&curtm, &cursec);
+ if ((secs - cursec) < 59) {
+ dev_dbg(dev, "Alarm less than 1 minute not supported\n");
+ return -EINVAL;
+ }
+
/*
* Convert it to the number of seconds since 01-01-2000 00:00:00, since
* we only have a small counter in the RTC.
*/
secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+#ifndef CONFIG_ANDROID
+ secs += 30; /* Round to nearest minute */
+#endif
+
mins = secs / 60;
+#ifdef CONFIG_ANDROID
+ /*
+ * Needed due to Android believes all hw have a wake-up resolution
+ * in seconds.
+ */
+ mins++;
+#endif
buf[2] = mins & 0xFF;
buf[1] = (mins >> 8) & 0xFF;
buf[0] = (mins >> 16) & 0xFF;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c063fb2..cc0533994f6 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
unsigned long events = 0;
rtcmis = readl(ldata->base + RTC_MIS);
- if (rtcmis) {
- writel(rtcmis, ldata->base + RTC_ICR);
-
- if (rtcmis & RTC_BIT_AI)
- events |= (RTC_AF | RTC_IRQF);
-
- /* Timer interrupt is only available in ST variants */
- if ((rtcmis & RTC_BIT_PI) &&
- (ldata->hw_designer == AMBA_VENDOR_ST))
- events |= (RTC_PF | RTC_IRQF);
-
+ if (rtcmis & RTC_BIT_AI) {
+ writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+ events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(ldata->rtc, 1, events);
return IRQ_HANDLED;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 00c024039c9..b8aa82fa2a6 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -113,6 +113,20 @@ config SPI_BITBANG
need it. You only need to select this explicitly to support driver
modules that aren't part of this kernel tree.
+config STM_MSP_SPI
+ tristate "STM MSP CONTROLLER (SPI master)"
+ default y
+ help
+ This enables using the STM MSP controller in master
+ mode.
+
+config SPI_WORKQUEUE
+ bool "SPI_WORKQUEUE"
+ depends on STM_MSP_SPI
+ default n
+ help
+ This feature allow SPI works to be deferred in MSP driver.
+
config SPI_BUTTERFLY
tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9d75d2198ff..6c10ed18023 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -62,4 +62,5 @@ obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_STM_MSP_SPI) += stm_msp.o
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 400ae2121a2..8bf53a76a33 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -489,6 +489,13 @@ static void giveback(struct pl022 *pl022)
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
spi_finalize_current_message(pl022->master);
+
+ /* disable the SPI/SSP operation */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ /* This message is completed, so let's turn off the clocks & power */
+ pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -895,6 +902,12 @@ static int configure_dma(struct pl022 *pl022)
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
+ /* DMA burstsize should be same as the FIFO trigger level */
+ rx_conf.src_maxburst = pl022->rx_lev_trig ? 1 <<
+ (pl022->rx_lev_trig + 1) : pl022->rx_lev_trig;
+ tx_conf.dst_maxburst = pl022->tx_lev_trig ? 1 <<
+ (pl022->tx_lev_trig + 1) : pl022->tx_lev_trig;
+
/* Check that the channels are available */
if (!rxchan || !txchan)
return -ENODEV;
@@ -2048,6 +2061,9 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
pl022->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
status = PTR_ERR(pl022->clk);
@@ -2158,6 +2174,7 @@ pl022_remove(struct amba_device *adev)
clk_disable(pl022->clk);
clk_unprepare(pl022->clk);
clk_put(pl022->clk);
+ pm_runtime_disable(&adev->dev);
iounmap(pl022->virtbase);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
diff --git a/drivers/spi/stm_msp.c b/drivers/spi/stm_msp.c
new file mode 100644
index 00000000000..65dc316b37b
--- /dev/null
+++ b/drivers/spi/stm_msp.c
@@ -0,0 +1,1929 @@
+/*
+ * drivers/spi/stm_msp.c
+ *
+ * Copyright (C) 2010 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/bus.h>
+#include <linux/spi/stm_msp.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+/**
+ * MSP Controller Register Offsets
+ */
+#define MSP_DR(r) (r + 0x000)
+#define MSP_GCR(r) (r + 0x004)
+#define MSP_TCF(r) (r + 0x008)
+#define MSP_RCF(r) (r + 0x00C)
+#define MSP_SRG(r) (r + 0x010)
+#define MSP_FLR(r) (r + 0x014)
+#define MSP_DMACR(r) (r + 0x018)
+#define MSP_IMSC(r) (r + 0x020)
+#define MSP_RIS(r) (r + 0x024)
+#define MSP_MIS(r) (r + 0x028)
+#define MSP_ICR(r) (r + 0x02C)
+#define MSP_MCR(r) (r + 0x030)
+#define MSP_RCV(r) (r + 0x034)
+#define MSP_RCM(r) (r + 0x038)
+#define MSP_TCE0(r) (r + 0x040)
+#define MSP_TCE1(r) (r + 0x044)
+#define MSP_TCE2(r) (r + 0x048)
+#define MSP_TCE3(r) (r + 0x04C)
+#define MSP_RCE0(r) (r + 0x060)
+#define MSP_RCE1(r) (r + 0x064)
+#define MSP_RCE2(r) (r + 0x068)
+#define MSP_RCE3(r) (r + 0x06C)
+#define MSP_PID0(r) (r + 0xFE0)
+#define MSP_PID1(r) (r + 0xFE4)
+#define MSP_PID2(r) (r + 0xFE8)
+#define MSP_PID3(r) (r + 0xFEC)
+
+/**
+ * MSP Global Configuration Register - MSP_GCR
+ */
+#define MSP_GCR_MASK_RXEN ((u32)(0x1UL << 0))
+#define MSP_GCR_MASK_RFFEN ((u32)(0x1UL << 1))
+#define MSP_GCR_MASK_RFSPOL ((u32)(0x1UL << 2))
+#define MSP_GCR_MASK_DCM ((u32)(0x1UL << 3))
+#define MSP_GCR_MASK_RFSSEL ((u32)(0x1UL << 4))
+#define MSP_GCR_MASK_RCKPOL ((u32)(0x1UL << 5))
+#define MSP_GCR_MASK_RCKSEL ((u32)(0x1UL << 6))
+#define MSP_GCR_MASK_LBM ((u32)(0x1UL << 7))
+#define MSP_GCR_MASK_TXEN ((u32)(0x1UL << 8))
+#define MSP_GCR_MASK_TFFEN ((u32)(0x1UL << 9))
+#define MSP_GCR_MASK_TFSPOL ((u32)(0x1UL << 10))
+#define MSP_GCR_MASK_TFSSEL ((u32)(0x3UL << 11))
+#define MSP_GCR_MASK_TCKPOL ((u32)(0x1UL << 13))
+#define MSP_GCR_MASK_TCKSEL ((u32)(0x1UL << 14))
+#define MSP_GCR_MASK_TXDDL ((u32)(0x1UL << 15))
+#define MSP_GCR_MASK_SGEN ((u32)(0x1UL << 16))
+#define MSP_GCR_MASK_SCKPOL ((u32)(0x1UL << 17))
+#define MSP_GCR_MASK_SCKSEL ((u32)(0x3UL << 18))
+#define MSP_GCR_MASK_FGEN ((u32)(0x1UL << 20))
+#define MSP_GCR_MASK_SPICKM ((u32)(0x3UL << 21))
+#define MSP_GCR_MASK_SPIBME ((u32)(0x1UL << 23))
+
+/**
+ * MSP Transmit Configuration Register - MSP_TCF
+ */
+#define MSP_TCF_MASK_TP1ELEN ((u32)(0x7UL << 0))
+#define MSP_TCF_MASK_TP1FLEN ((u32)(0x7FUL << 3))
+#define MSP_TCF_MASK_TDTYP ((u32)(0x3UL << 10))
+#define MSP_TCF_MASK_TENDN ((u32)(0x1UL << 12))
+#define MSP_TCF_MASK_TDDLY ((u32)(0x3UL << 13))
+#define MSP_TCF_MASK_TFSIG ((u32)(0x1UL << 15))
+#define MSP_TCF_MASK_TP2ELEN ((u32)(0x7UL << 16))
+#define MSP_TCF_MASK_TP2FLEN ((u32)(0x7FUL << 19))
+#define MSP_TCF_MASK_TP2SM ((u32)(0x1UL << 26))
+#define MSP_TCF_MASK_TP2EN ((u32)(0x1UL << 27))
+#define MSP_TCF_MASK_TBSWAP ((u32)(0x3UL << 28))
+
+/**
+ * MSP Receive Configuration Register - MSP_RCF
+ */
+#define MSP_RCF_MASK_RP1ELEN ((u32)(0x7UL << 0))
+#define MSP_RCF_MASK_RP1FLEN ((u32)(0x7FUL << 3))
+#define MSP_RCF_MASK_RDTYP ((u32)(0x3UL << 10))
+#define MSP_RCF_MASK_RENDN ((u32)(0x1UL << 12))
+#define MSP_RCF_MASK_RDDLY ((u32)(0x3UL << 13))
+#define MSP_RCF_MASK_RFSIG ((u32)(0x1UL << 15))
+#define MSP_RCF_MASK_RP2ELEN ((u32)(0x7UL << 16))
+#define MSP_RCF_MASK_RP2FLEN ((u32)(0x7FUL << 19))
+#define MSP_RCF_MASK_RP2SM ((u32)(0x1UL << 26))
+#define MSP_RCF_MASK_RP2EN ((u32)(0x1UL << 27))
+#define MSP_RCF_MASK_RBSWAP ((u32)(0x3UL << 28))
+
+/**
+ * MSP Sample Rate Generator Register - MSP_SRG
+ */
+#define MSP_SRG_MASK_SCKDIV ((u32)(0x3FFUL << 0))
+#define MSP_SRG_MASK_FRWID ((u32)(0x3FUL << 10))
+#define MSP_SRG_MASK_FRPER ((u32)(0x1FFFUL << 16))
+
+/**
+ * MSP Flag Register - MSP_FLR
+ */
+#define MSP_FLR_MASK_RBUSY ((u32)(0x1UL << 0))
+#define MSP_FLR_MASK_RFE ((u32)(0x1UL << 1))
+#define MSP_FLR_MASK_RFU ((u32)(0x1UL << 2))
+#define MSP_FLR_MASK_TBUSY ((u32)(0x1UL << 3))
+#define MSP_FLR_MASK_TFE ((u32)(0x1UL << 4))
+#define MSP_FLR_MASK_TFU ((u32)(0x1UL << 5))
+
+/**
+ * MSP DMA Control Register - MSP_DMACR
+ */
+#define MSP_DMACR_MASK_RDMAE ((u32)(0x1UL << 0))
+#define MSP_DMACR_MASK_TDMAE ((u32)(0x1UL << 1))
+
+/**
+ * MSP Interrupt Mask Set/Clear Register - MSP_IMSC
+ */
+#define MSP_IMSC_MASK_RXIM ((u32)(0x1UL << 0))
+#define MSP_IMSC_MASK_ROEIM ((u32)(0x1UL << 1))
+#define MSP_IMSC_MASK_RSEIM ((u32)(0x1UL << 2))
+#define MSP_IMSC_MASK_RFSIM ((u32)(0x1UL << 3))
+#define MSP_IMSC_MASK_TXIM ((u32)(0x1UL << 4))
+#define MSP_IMSC_MASK_TUEIM ((u32)(0x1UL << 5))
+#define MSP_IMSC_MASK_TSEIM ((u32)(0x1UL << 6))
+#define MSP_IMSC_MASK_TFSIM ((u32)(0x1UL << 7))
+#define MSP_IMSC_MASK_RFOIM ((u32)(0x1UL << 8))
+#define MSP_IMSC_MASK_TFOIM ((u32)(0x1UL << 9))
+
+/**
+ * MSP Raw Interrupt status Register - MSP_RIS
+ */
+#define MSP_RIS_MASK_RXRIS ((u32)(0x1UL << 0))
+#define MSP_RIS_MASK_ROERIS ((u32)(0x1UL << 1))
+#define MSP_RIS_MASK_RSERIS ((u32)(0x1UL << 2))
+#define MSP_RIS_MASK_RFSRIS ((u32)(0x1UL << 3))
+#define MSP_RIS_MASK_TXRIS ((u32)(0x1UL << 4))
+#define MSP_RIS_MASK_TUERIS ((u32)(0x1UL << 5))
+#define MSP_RIS_MASK_TSERIS ((u32)(0x1UL << 6))
+#define MSP_RIS_MASK_TFSRIS ((u32)(0x1UL << 7))
+#define MSP_RIS_MASK_RFORIS ((u32)(0x1UL << 8))
+#define MSP_RIS_MASK_TFORIS ((u32)(0x1UL << 9))
+
+/**
+ * MSP Masked Interrupt status Register - MSP_MIS
+ */
+#define MSP_MIS_MASK_RXMIS ((u32)(0x1UL << 0))
+#define MSP_MIS_MASK_ROEMIS ((u32)(0x1UL << 1))
+#define MSP_MIS_MASK_RSEMIS ((u32)(0x1UL << 2))
+#define MSP_MIS_MASK_RFSMIS ((u32)(0x1UL << 3))
+#define MSP_MIS_MASK_TXMIS ((u32)(0x1UL << 4))
+#define MSP_MIS_MASK_TUEMIS ((u32)(0x1UL << 5))
+#define MSP_MIS_MASK_TSEMIS ((u32)(0x1UL << 6))
+#define MSP_MIS_MASK_TFSMIS ((u32)(0x1UL << 7))
+#define MSP_MIS_MASK_RFOMIS ((u32)(0x1UL << 8))
+#define MSP_MIS_MASK_TFOMIS ((u32)(0x1UL << 9))
+
+/**
+ * MSP Interrupt Clear Register - MSP_ICR
+ */
+#define MSP_ICR_MASK_ROEIC ((u32)(0x1UL << 1))
+#define MSP_ICR_MASK_RSEIC ((u32)(0x1UL << 2))
+#define MSP_ICR_MASK_RFSIC ((u32)(0x1UL << 3))
+#define MSP_ICR_MASK_TUEIC ((u32)(0x1UL << 5))
+#define MSP_ICR_MASK_TSEIC ((u32)(0x1UL << 6))
+#define MSP_ICR_MASK_TFSIC ((u32)(0x1UL << 7))
+
+#define GEN_MASK_BITS(val, mask, sb) ((u32)((((u32)val) << (sb)) & (mask)))
+#define MSP_WBITS(reg, val, mask, sb) ((reg) = (((reg) & ~(mask)) |\
+ (((val) << (sb)) & (mask))))
+#define DEFAULT_MSP_REG_DMACR 0x00000000
+#define DEFAULT_MSP_REG_SRG 0x1FFF0000
+
+#define DEFAULT_MSP_REG_GCR ( \
+ GEN_MASK_BITS(MSP_RECEIVER_DISABLED, MSP_GCR_MASK_RXEN, 0) |\
+ GEN_MASK_BITS(MSP_RX_FIFO_ENABLED, MSP_GCR_MASK_RFFEN, 1) |\
+ GEN_MASK_BITS(MSP_LOOPBACK_DISABLED, MSP_GCR_MASK_LBM, 7) |\
+ GEN_MASK_BITS(MSP_TRANSMITTER_DISABLED, MSP_GCR_MASK_TXEN, 8) |\
+ GEN_MASK_BITS(MSP_TX_FIFO_ENABLED, MSP_GCR_MASK_TFFEN, 9) |\
+ GEN_MASK_BITS(MSP_TX_FRAME_SYNC_POL_LOW, MSP_GCR_MASK_TFSPOL, 10)|\
+ GEN_MASK_BITS(MSP_TX_FRAME_SYNC_INT, MSP_GCR_MASK_TFSSEL, 11) |\
+ GEN_MASK_BITS(MSP_TX_CLOCK_POL_HIGH, MSP_GCR_MASK_TCKPOL, 13) |\
+ GEN_MASK_BITS(MSP_IS_SPI_MASTER, MSP_GCR_MASK_TCKSEL, 14) |\
+ GEN_MASK_BITS(MSP_TRANSMIT_DATA_WITHOUT_DELAY, MSP_GCR_MASK_TXDDL, 15)|\
+ GEN_MASK_BITS(MSP_SAMPLE_RATE_GEN_ENABLE, MSP_GCR_MASK_SGEN, 16)|\
+ GEN_MASK_BITS(MSP_CLOCK_INTERNAL, MSP_GCR_MASK_SCKSEL, 18) |\
+ GEN_MASK_BITS(MSP_FRAME_GEN_ENABLE, MSP_GCR_MASK_FGEN, 20) |\
+ GEN_MASK_BITS(MSP_SPI_PHASE_ZERO_CYCLE_DELAY, MSP_GCR_MASK_SPICKM, 21)|\
+ GEN_MASK_BITS(SPI_BURST_MODE_DISABLE, MSP_GCR_MASK_SPIBME, 23)\
+ )
+#define DEFAULT_MSP_REG_RCF ( \
+ GEN_MASK_BITS(MSP_DATA_BITS_32, MSP_RCF_MASK_RP1ELEN, 0) | \
+ GEN_MASK_BITS(MSP_IGNORE_RX_FRAME_SYNC_PULSE, MSP_RCF_MASK_RFSIG, 15) |\
+ GEN_MASK_BITS(MSP_RX_1BIT_DATA_DELAY, MSP_RCF_MASK_RDDLY, 13) | \
+ GEN_MASK_BITS(MSP_RX_ENDIANESS_LSB, MSP_RCF_MASK_RENDN, 12) \
+ )
+
+#define DEFAULT_MSP_REG_TCF ( \
+ GEN_MASK_BITS(MSP_DATA_BITS_32, MSP_TCF_MASK_TP1ELEN, 0) | \
+ GEN_MASK_BITS(MSP_IGNORE_TX_FRAME_SYNC_PULSE, MSP_TCF_MASK_TFSIG, 15) |\
+ GEN_MASK_BITS(MSP_TX_1BIT_DATA_DELAY, MSP_TCF_MASK_TDDLY, 13) | \
+ GEN_MASK_BITS(MSP_TX_ENDIANESS_LSB, MSP_TCF_MASK_TENDN, 12) \
+ )
+
+/**
+ * MSP Receiver/Transmitter states (enabled or disabled)
+ */
+#define MSP_RECEIVER_DISABLED 0
+#define MSP_RECEIVER_ENABLED 1
+#define MSP_TRANSMITTER_DISABLED 0
+#define MSP_TRANSMITTER_ENABLED 1
+
+/**
+ * MSP Receiver/Transmitter FIFO constants
+ */
+#define MSP_LOOPBACK_DISABLED 0
+#define MSP_LOOPBACK_ENABLED 1
+
+#define MSP_TX_FIFO_DISABLED 0
+#define MSP_TX_FIFO_ENABLED 1
+#define MSP_TX_ENDIANESS_MSB 0
+#define MSP_TX_ENDIANESS_LSB 1
+
+#define MSP_RX_FIFO_DISABLED 0
+#define MSP_RX_FIFO_ENABLED 1
+#define MSP_RX_ENDIANESS_MSB 0
+#define MSP_RX_ENDIANESS_LSB 1
+
+#define MSP_TX_FRAME_SYNC_EXT 0x0
+#define MSP_TX_FRAME_SYNC_INT 0x2
+#define MSP_TX_FRAME_SYNC_INT_CFG 0x3
+
+#define MSP_TX_FRAME_SYNC_POL_HIGH 0
+#define MSP_TX_FRAME_SYNC_POL_LOW 1
+
+#define MSP_HANDLE_RX_FRAME_SYNC_PULSE 0
+#define MSP_IGNORE_RX_FRAME_SYNC_PULSE 1
+
+#define MSP_RX_NO_DATA_DELAY 0x0
+#define MSP_RX_1BIT_DATA_DELAY 0x1
+#define MSP_RX_2BIT_DATA_DELAY 0x2
+#define MSP_RX_3BIT_DATA_DELAY 0x3
+
+#define MSP_HANDLE_TX_FRAME_SYNC_PULSE 0
+#define MSP_IGNORE_TX_FRAME_SYNC_PULSE 1
+
+#define MSP_TX_NO_DATA_DELAY 0x0
+#define MSP_TX_1BIT_DATA_DELAY 0x1
+#define MSP_TX_2BIT_DATA_DELAY 0x2
+#define MSP_TX_3BIT_DATA_DELAY 0x3
+
+#define MSP_TX_CLOCK_POL_LOW 0
+#define MSP_TX_CLOCK_POL_HIGH 1
+
+#define MSP_SPI_PHASE_ZERO_CYCLE_DELAY 0x2
+#define MSP_SPI_PHASE_HALF_CYCLE_DELAY 0x3
+
+#define MSP_IS_SPI_SLAVE 0
+#define MSP_IS_SPI_MASTER 1
+
+#define MSP_FRAME_GEN_DISABLE 0
+#define MSP_FRAME_GEN_ENABLE 1
+
+#define MSP_SAMPLE_RATE_GEN_DISABLE 0
+#define MSP_SAMPLE_RATE_GEN_ENABLE 1
+
+#define SPI_BURST_MODE_DISABLE 0
+#define SPI_BURST_MODE_ENABLE 1
+
+#define MSP_TRANSMIT_DATA_WITHOUT_DELAY 0
+#define MSP_TRANSMIT_DATA_WITH_DELAY 1
+
+#define MSP_CLOCK_INTERNAL 0x0 /* 48 MHz */
+
+/* SRG is derived from MSPSCK pin but is resynchronized on MSPRFS
+ * (Receive Frame Sync signal) */
+#define MSP_CLOCK_EXTERNAL 0x2
+#define MSP_CLOCK_EXTERNAL_RESYNC 0x3
+
+#define DISABLE_ALL_MSP_INTERRUPTS (0x0)
+#define ENABLE_ALL_MSP_INTERRUPTS (0x333)
+#define CLEAR_ALL_MSP_INTERRUPTS (0xEE)
+#define DEFAULT_MSP_CLK (48000000)
+#define MAX_SCKDIV (1023)
+
+#define MSP_FIFO_DEPTH 8
+
+/**
+ * Queue State
+ */
+#define QUEUE_RUNNING (0)
+#define QUEUE_STOPPED (1)
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+/* Default values */
+#define SPI_DEFAULT_MAX_SPEED_HZ 48000
+#define SPI_TRANSFER_TIMEOUT_MS 5000
+
+/* CONTROLLER COMMANDS */
+enum cntlr_commands {
+ DISABLE_CONTROLLER = 0,
+ ENABLE_CONTROLLER ,
+ DISABLE_ALL_INTERRUPT ,
+ ENABLE_ALL_INTERRUPT ,
+ FLUSH_FIFO ,
+ RESTORE_STATE ,
+ LOAD_DEFAULT_CONFIG ,
+ CLEAR_ALL_INTERRUPT,
+};
+
+struct stm_msp {
+ struct amba_device *adev;
+ struct spi_master *master;
+ struct stm_msp_controller *master_info;
+ void __iomem *regs;
+ struct clk *clk;
+#ifdef CONFIG_SPI_WORKQUEUE
+ struct workqueue_struct *workqueue;
+#endif
+ struct work_struct spi_work;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ int run;
+ struct tasklet_struct pump_transfers;
+ struct timer_list spi_notify_timer;
+ int spi_io_error;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ void (*write)(struct stm_msp *stm_msp);
+ void (*read)(struct stm_msp *stm_msp);
+ void (*delay)(struct stm_msp *stm_msp);
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SPICntlr for each client chip
+ * @ctr_regs: void pointer which is assigned a struct having regs of the cntlr.
+ * @chip_id: Chip Id assigned to this client to identify it.
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @write: function to be used to write when doing xfer for this chip
+ * @null_write: function to be used for dummy write for receiving data.
+ * @read: function to be used to read when doing xfer for this chip
+ * @null_read: function to be used to for dummy read while writting data.
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt
+ *
+ * Runtime state of the SPI controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ void *ctr_regs;
+ u32 chip_id;
+ u8 n_bytes;
+ void (*write) (struct stm_msp *stm_msp);
+ void (*null_write) (struct stm_msp *stm_msp);
+ void (*read) (struct stm_msp *stm_msp);
+ void (*null_read) (struct stm_msp *stm_msp);
+ void (*delay) (struct stm_msp *stm_msp);
+ void (*cs_control) (u32 command);
+ int xfer_type;
+};
+
+/**
+ * struct msp_regs - Used to store MSP controller registry values
+ * used by the driver.
+ * @gcr: global configuration register
+ * @tcf: transmit configuration register
+ * @rcf: receive configuration register
+ * @srg: sample rate generator register
+ * @dmacr: DMA configuration register
+ */
+struct msp_regs {
+ u32 gcr;
+ u32 tcf;
+ u32 rcf;
+ u32 srg;
+ u32 dmacr;
+};
+
+/**
+ * stm_msp_controller_cmd - To execute controller commands for MSP
+ * @stm_msp: SPI driver private data structure
+ * @cmd: Command which is to be executed on the controller
+ */
+static int stm_msp_controller_cmd(struct stm_msp *stm_msp, int cmd)
+{
+ int retval = 0;
+ struct msp_regs *msp_regs = NULL;
+
+ switch (cmd) {
+ case DISABLE_CONTROLLER: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Disabling MSP controller...\n");
+ writel((readl(MSP_GCR(stm_msp->regs)) &
+ (~(MSP_GCR_MASK_TXEN | MSP_GCR_MASK_RXEN))),
+ MSP_GCR(stm_msp->regs));
+ break;
+ }
+ case ENABLE_CONTROLLER: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Enabling MSP controller...\n");
+ writel((readl(MSP_GCR(stm_msp->regs)) |
+ (MSP_GCR_MASK_TXEN | MSP_GCR_MASK_RXEN)),
+ MSP_GCR(stm_msp->regs));
+ break;
+ }
+ case DISABLE_ALL_INTERRUPT: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Disabling all MSP interrupts...\n");
+ writel(DISABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ break;
+ }
+ case ENABLE_ALL_INTERRUPT: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Enabling all MSP interrupts...\n");
+ writel(ENABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ break;
+ }
+ case CLEAR_ALL_INTERRUPT: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Clearing all MSP interrupts...\n");
+ writel(CLEAR_ALL_MSP_INTERRUPTS,
+ MSP_ICR(stm_msp->regs));
+ break;
+ }
+ case FLUSH_FIFO: {
+ unsigned long limit = loops_per_jiffy << 1;
+
+ dev_dbg(&stm_msp->adev->dev, "MSP FIFO flushed\n");
+
+ do {
+ while (!(readl(MSP_FLR(stm_msp->regs)) &
+ MSP_FLR_MASK_RFE)) {
+ readl(MSP_DR(stm_msp->regs));
+ }
+ } while ((readl(MSP_FLR(stm_msp->regs)) &
+ (MSP_FLR_MASK_TBUSY | MSP_FLR_MASK_RBUSY)) &&
+ limit--);
+
+ retval = limit;
+ break;
+ }
+ case RESTORE_STATE: {
+ msp_regs =
+ (struct msp_regs *)stm_msp->cur_chip->ctr_regs;
+
+ dev_dbg(&stm_msp->adev->dev,
+ "Restoring MSP state...\n");
+
+ writel(msp_regs->gcr, MSP_GCR(stm_msp->regs));
+ writel(msp_regs->tcf, MSP_TCF(stm_msp->regs));
+ writel(msp_regs->rcf, MSP_RCF(stm_msp->regs));
+ writel(msp_regs->srg, MSP_SRG(stm_msp->regs));
+ writel(msp_regs->dmacr, MSP_DMACR(stm_msp->regs));
+ writel(DISABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ writel(CLEAR_ALL_MSP_INTERRUPTS,
+ MSP_ICR(stm_msp->regs));
+ break;
+ }
+ case LOAD_DEFAULT_CONFIG: {
+ dev_dbg(&stm_msp->adev->dev,
+ "Loading default MSP config...\n");
+
+ writel(DEFAULT_MSP_REG_GCR, MSP_GCR(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_TCF, MSP_TCF(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_RCF, MSP_RCF(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_SRG, MSP_SRG(stm_msp->regs));
+ writel(DEFAULT_MSP_REG_DMACR, MSP_DMACR(stm_msp->regs));
+ writel(DISABLE_ALL_MSP_INTERRUPTS,
+ MSP_IMSC(stm_msp->regs));
+ writel(CLEAR_ALL_MSP_INTERRUPTS,
+ MSP_ICR(stm_msp->regs));
+ break;
+ }
+ default:
+ dev_dbg(&stm_msp->adev->dev, "Unknown command\n");
+ retval = -1;
+ break;
+ }
+
+ return retval;
+}
+
+/**
+ * giveback - current spi_message is over, schedule next spi_message
+ * @message: current SPI message
+ * @stm_msp: spi driver private data structure
+ *
+ * current spi_message is over, schedule next spi_message and call
+ * callback of this msg.
+ */
+static void giveback(struct spi_message *message, struct stm_msp *stm_msp)
+{
+ struct spi_transfer *last_transfer;
+ unsigned long flags;
+ struct spi_message *msg;
+ void (*curr_cs_control)(u32 command);
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+ msg = stm_msp->cur_msg;
+
+ curr_cs_control = stm_msp->cur_chip->cs_control;
+
+ stm_msp->cur_msg = NULL;
+ stm_msp->cur_transfer = NULL;
+ stm_msp->cur_chip = NULL;
+#ifdef CONFIG_SPI_WORKQUEUE
+ queue_work(stm_msp->workqueue, &stm_msp->spi_work);
+#else
+ schedule_work(&stm_msp->spi_work);
+#endif
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+
+ last_transfer = list_entry(msg->transfers.prev,
+ struct spi_transfer, transfer_list);
+
+ if (!last_transfer->cs_change)
+ curr_cs_control(SPI_CHIP_DESELECT);
+
+ msg->state = NULL;
+
+ if (msg->complete)
+ msg->complete(msg->context);
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+ clk_disable(stm_msp->clk);
+}
+
+/**
+ * spi_notify - Handles Polling hang issue over spi bus.
+ * @data: main driver data
+ * Context: Process.
+ *
+ * This is used to handle error condition in transfer and receive function used
+ * in polling mode.
+ * Sometimes due to passing wrong protocol desc , polling transfer may hang.
+ * To prevent this, timer is added.
+ *
+ * Returns void.
+ */
+static void spi_notify(unsigned long data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+ stm_msp->spi_io_error = 1;
+
+ dev_err(&stm_msp->adev->dev,
+ "Polling is taking time, maybe device not responding\n");
+
+ del_timer(&stm_msp->spi_notify_timer);
+}
+
+/**
+ * stm_msp_transfer - transfer function registered to SPI master framework
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will queue the spi_message in the queue of driver if
+ * the queue is not stopped and return.
+ */
+static int stm_msp_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct stm_msp *stm_msp = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ if (stm_msp->run == QUEUE_STOPPED) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return -ESHUTDOWN;
+ }
+ dev_err(&spi->dev, "Regular request (No infinite DMA ongoing)\n");
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ list_add_tail(&msg->queue, &stm_msp->queue);
+
+ if ((stm_msp->run == QUEUE_RUNNING) && (!stm_msp->busy))
+#ifdef CONFIG_SPI_WORKQUEUE
+ queue_work(stm_msp->workqueue, &stm_msp->spi_work);
+#else
+ schedule_work(&stm_msp->spi_work);
+#endif
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return 0;
+}
+
+/**
+ * next_transfer - Move to the Next transfer in the current spi message
+ * @stm_msp: spi driver private data structure
+ *
+ * This function moves though the linked list of spi transfers in the
+ * current spi message and returns with the state of current spi
+ * message i.e whether its last transfer is done(DONE_STATE) or
+ * Next transfer is ready(RUNNING_STATE)
+ */
+static void *next_transfer(struct stm_msp *stm_msp)
+{
+ struct spi_message *msg = stm_msp->cur_msg;
+ struct spi_transfer *trans = stm_msp->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ stm_msp->cur_transfer = list_entry(trans->transfer_list.next,
+ struct spi_transfer,
+ transfer_list);
+ return RUNNING_STATE;
+ }
+ return DONE_STATE;
+}
+
+static void do_interrupt_transfer(void *data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+
+ stm_msp->tx = (void *)stm_msp->cur_transfer->tx_buf;
+ stm_msp->tx_end = stm_msp->tx + stm_msp->cur_transfer->len;
+
+ stm_msp->rx = (void *)stm_msp->cur_transfer->rx_buf;
+ stm_msp->rx_end = stm_msp->rx + stm_msp->cur_transfer->len;
+
+ stm_msp->write = stm_msp->tx ?
+ stm_msp->cur_chip->write : stm_msp->cur_chip->null_write;
+ stm_msp->read = stm_msp->rx ?
+ stm_msp->cur_chip->read : stm_msp->cur_chip->null_read;
+
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+
+ stm_msp_controller_cmd(stm_msp, ENABLE_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, ENABLE_CONTROLLER);
+}
+
+static void do_polling_transfer(void *data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip;
+ unsigned long limit = 0;
+ u32 timer_expire = 0;
+
+ chip = stm_msp->cur_chip;
+ message = stm_msp->cur_msg;
+
+ while (message->state != DONE_STATE) {
+ /* Handle for abort */
+ if (message->state == ERROR_STATE)
+ break;
+
+ transfer = stm_msp->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+
+ if (previous->cs_change)
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+ } else {
+ /* START_STATE */
+ message->state = RUNNING_STATE;
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+ }
+
+ /* Configuration Changing Per Transfer */
+ stm_msp->tx = (void *)transfer->tx_buf;
+ stm_msp->tx_end = stm_msp->tx + stm_msp->cur_transfer->len;
+ stm_msp->rx = (void *)transfer->rx_buf;
+ stm_msp->rx_end = stm_msp->rx + stm_msp->cur_transfer->len;
+
+ stm_msp->write = stm_msp->tx ?
+ stm_msp->cur_chip->write :
+ stm_msp->cur_chip->null_write;
+ stm_msp->read = stm_msp->rx ?
+ stm_msp->cur_chip->read :
+ stm_msp->cur_chip->null_read;
+ stm_msp->delay = stm_msp->cur_chip->delay;
+
+ stm_msp_controller_cmd(stm_msp, FLUSH_FIFO);
+ stm_msp_controller_cmd(stm_msp, ENABLE_CONTROLLER);
+
+ timer_expire = stm_msp->cur_transfer->len / 1024;
+
+ if (!timer_expire)
+ timer_expire = SPI_TRANSFER_TIMEOUT_MS;
+ else
+ timer_expire =
+ (stm_msp->cur_transfer->len / 1024) *
+ SPI_TRANSFER_TIMEOUT_MS;
+
+ stm_msp->spi_notify_timer.expires =
+ jiffies + msecs_to_jiffies(timer_expire);
+
+ add_timer(&stm_msp->spi_notify_timer);
+
+ dev_dbg(&stm_msp->adev->dev, "Polling transfer ongoing...\n");
+
+ while (stm_msp->tx < stm_msp->tx_end) {
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+ stm_msp->read(stm_msp);
+ stm_msp->write(stm_msp);
+
+ stm_msp_controller_cmd(stm_msp, ENABLE_CONTROLLER);
+
+ if (stm_msp->delay)
+ stm_msp->delay(stm_msp);
+
+ if (stm_msp->spi_io_error == 1)
+ break;
+ }
+
+ del_timer(&stm_msp->spi_notify_timer);
+
+ if (stm_msp->spi_io_error == 1)
+ goto out;
+
+ limit = loops_per_jiffy << 1;
+
+ while ((stm_msp->rx < stm_msp->rx_end) && (limit--))
+ stm_msp->read(stm_msp);
+
+ /* Update total byte transfered */
+ message->actual_length += stm_msp->cur_transfer->len;
+
+ if (stm_msp->cur_transfer->cs_change)
+ stm_msp->cur_chip->cs_control(SPI_CHIP_DESELECT);
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+
+ /* Move to next transfer */
+ message->state = next_transfer(stm_msp);
+ }
+out:
+ /* Handle end of message */
+ if (message->state == DONE_STATE)
+ message->status = 0;
+ else
+ message->status = -EIO;
+
+ giveback(message, stm_msp);
+
+ stm_msp->spi_io_error = 0; /* Reset state for further transfers */
+
+ return;
+}
+
+/**
+ * pump_messages - Workqueue function which processes spi message queue
+ * @work: pointer to work
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and delegate control to appropriate function
+ * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
+ * based on the kind of the transfer
+ *
+ */
+static void pump_messages(struct work_struct *work)
+{
+ struct stm_msp *stm_msp = container_of(work, struct stm_msp, spi_work);
+ unsigned long flags;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ if (list_empty(&stm_msp->queue) || stm_msp->run == QUEUE_STOPPED) {
+ dev_dbg(&stm_msp->adev->dev, "work_queue: Queue Empty\n");
+ stm_msp->busy = 0;
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return;
+ }
+ /* Make sure we are not already running a message */
+ if (stm_msp->cur_msg) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return;
+ }
+
+ clk_enable(stm_msp->clk);
+
+ /* Extract head of queue */
+ stm_msp->cur_msg = list_entry(stm_msp->queue.next,
+ struct spi_message,
+ queue);
+
+ list_del_init(&stm_msp->cur_msg->queue);
+ stm_msp->busy = 1;
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+
+ /* Initial message state */
+ stm_msp->cur_msg->state = START_STATE;
+ stm_msp->cur_transfer = list_entry(stm_msp->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+
+ /* Setup the SPI using the per chip configuration */
+ stm_msp->cur_chip = spi_get_ctldata(stm_msp->cur_msg->spi);
+ stm_msp_controller_cmd(stm_msp, RESTORE_STATE);
+ stm_msp_controller_cmd(stm_msp, FLUSH_FIFO);
+
+ if (stm_msp->cur_chip->xfer_type == SPI_POLLING_TRANSFER)
+ do_polling_transfer(stm_msp);
+ else if (stm_msp->cur_chip->xfer_type == SPI_INTERRUPT_TRANSFER)
+ do_interrupt_transfer(stm_msp);
+}
+
+/**
+ * pump_transfers - Tasklet function which schedules next interrupt xfer
+ * @data: spi driver private data structure
+ */
+static void pump_transfers(unsigned long data)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+
+ message = stm_msp->cur_msg;
+
+ /* Handle for abort */
+ if (message->state == ERROR_STATE) {
+ message->status = -EIO;
+ giveback(message, stm_msp);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ message->status = 0;
+ giveback(message, stm_msp);
+ return;
+ }
+ transfer = stm_msp->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ if (previous->cs_change)
+ stm_msp->cur_chip->cs_control(SPI_CHIP_SELECT);
+ } else {
+ /* START_STATE */
+ message->state = RUNNING_STATE;
+ }
+ stm_msp->tx = (void *)transfer->tx_buf;
+ stm_msp->tx_end = stm_msp->tx + stm_msp->cur_transfer->len;
+ stm_msp->rx = (void *)transfer->rx_buf;
+ stm_msp->rx_end = stm_msp->rx + stm_msp->cur_transfer->len;
+
+ stm_msp->write = stm_msp->tx ?
+ stm_msp->cur_chip->write : stm_msp->cur_chip->null_write;
+ stm_msp->read = stm_msp->rx ?
+ stm_msp->cur_chip->read : stm_msp->cur_chip->null_read;
+
+ stm_msp_controller_cmd(stm_msp, FLUSH_FIFO);
+ stm_msp_controller_cmd(stm_msp, ENABLE_ALL_INTERRUPT);
+}
+
+static int init_queue(struct stm_msp *stm_msp)
+{
+ INIT_LIST_HEAD(&stm_msp->queue);
+ spin_lock_init(&stm_msp->lock);
+
+ stm_msp->run = QUEUE_STOPPED;
+ stm_msp->busy = 0;
+
+ tasklet_init(&stm_msp->pump_transfers, pump_transfers,
+ (unsigned long)stm_msp);
+ INIT_WORK(&stm_msp->spi_work, pump_messages);
+
+#ifdef CONFIG_SPI_WORKQUEUE
+ stm_msp->workqueue = create_singlethread_workqueue(
+ dev_name(&stm_msp->master->dev));
+
+ if (stm_msp->workqueue == NULL)
+ return -EBUSY;
+#endif /* CONFIG_SPI_WORKQUEUE */
+
+ init_timer(&stm_msp->spi_notify_timer);
+
+ stm_msp->spi_notify_timer.expires = jiffies + msecs_to_jiffies(1000);
+ stm_msp->spi_notify_timer.function = spi_notify;
+ stm_msp->spi_notify_timer.data = (unsigned long)stm_msp;
+
+ return 0;
+}
+
+static int start_queue(struct stm_msp *stm_msp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ if (stm_msp->run == QUEUE_RUNNING || stm_msp->busy) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return -EBUSY;
+ }
+
+ stm_msp->run = QUEUE_RUNNING;
+ stm_msp->cur_msg = NULL;
+ stm_msp->cur_transfer = NULL;
+ stm_msp->cur_chip = NULL;
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ return 0;
+}
+
+static int stop_queue(struct stm_msp *stm_msp)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&stm_msp->lock, flags);
+
+ /* This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the stm_msp->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead */
+
+ stm_msp->run = QUEUE_STOPPED;
+
+ while (!list_empty(&stm_msp->queue) && stm_msp->busy && limit--) {
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&stm_msp->lock, flags);
+ }
+
+ if (!list_empty(&stm_msp->queue) || stm_msp->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&stm_msp->lock, flags);
+
+ return status;
+}
+
+static int destroy_queue(struct stm_msp *stm_msp)
+{
+ int status;
+
+ status = stop_queue(stm_msp);
+
+ if (status != 0)
+ return status;
+#ifdef CONFIG_SPI_WORKQUEUE
+ destroy_workqueue(stm_msp->workqueue);
+#endif
+ del_timer_sync(&stm_msp->spi_notify_timer);
+
+ return 0;
+}
+
+/**
+ * stm_msp_null_writer - To Write Dummy Data in Data register
+ * @stm_msp: spi driver private data structure
+ *
+ * This function is set as a write function for transfer which have
+ * Tx transfer buffer as NULL. It simply writes '0' in the Data
+ * register
+ */
+static void stm_msp_null_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ writel(0x0, MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == 8)
+ return;
+ }
+}
+
+/**
+ * stm_msp_null_reader - To read data from Data register and discard it
+ * @stm_msp: spi driver private data structure
+ *
+ * This function is set as a reader function for transfer which have
+ * Rx Transfer buffer as null. Read Data is rejected
+ */
+static void stm_msp_null_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_u8_writer - Write FIFO data in Data register as a 8 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function writes data in Tx FIFO till it is not full
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary write ptr tx in stm_msp which maintains
+ * current write position in transfer buffer. we do not write data more than
+ * FIFO depth
+ */
+void stm_msp_u8_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ writel((u32)(*(u8 *)(stm_msp->tx)), MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == MSP_FIFO_DEPTH)
+ return;
+ }
+}
+
+/**
+ * stm_msp_u8_reader - Read FIFO data in Data register as a 8 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function reads data in Rx FIFO till it is not empty
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary Read ptr rx in stm_msp which maintains
+ * current read position in transfer buffer
+ */
+void stm_msp_u8_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ *(u8 *)(stm_msp->rx) = (u8)readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_u16_writer - Write FIFO data in Data register as a 16 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function writes data in Tx FIFO till it is not full
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary write ptr tx in stm_msp which maintains
+ * current write position in transfer buffer. we do not write data more than
+ * FIFO depth
+ */
+void stm_msp_u16_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ writel((u32)(*(u16 *)(stm_msp->tx)), MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == MSP_FIFO_DEPTH)
+ return;
+ }
+}
+
+/**
+ * stm_msp_u16_reader - Read FIFO data in Data register as a 16 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function reads data in Rx FIFO till it is not empty
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary Read ptr rx in stm_msp which maintains
+ * current read position in transfer buffer
+ */
+void stm_msp_u16_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ *(u16 *)(stm_msp->rx) = (u16)readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_u32_writer - Write FIFO data in Data register as a 32 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function writes data in Tx FIFO till it is not full
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary write ptr tx in stm_msp which maintains
+ * current write position in transfer buffer. we do not write data more than
+ * FIFO depth
+ */
+void stm_msp_u32_writer(struct stm_msp *stm_msp)
+{
+ u32 cur_write = 0;
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_TFU) ||
+ (stm_msp->tx >= stm_msp->tx_end))
+ return;
+
+ /* Write Data to Data Register */
+ writel(*(u32 *)(stm_msp->tx), MSP_DR(stm_msp->regs));
+ stm_msp->tx += (stm_msp->cur_chip->n_bytes);
+ cur_write++;
+
+ if (cur_write == MSP_FIFO_DEPTH)
+ return;
+ }
+}
+
+/**
+ * stm_msp_u32_reader - Read FIFO data in Data register as a 32 Bit Data
+ * @stm_msp: spi driver private data structure
+ *
+ * This function reads data in Rx FIFO till it is not empty
+ * which is indicated by the status register or our transfer is complete.
+ * It also updates the temporary Read ptr rx in stm_msp which maintains
+ * current read position in transfer buffer
+ */
+void stm_msp_u32_reader(struct stm_msp *stm_msp)
+{
+ u32 status;
+
+ while (1) {
+ status = readl(MSP_FLR(stm_msp->regs));
+
+ if ((status & MSP_FLR_MASK_RFE) ||
+ (stm_msp->rx >= stm_msp->rx_end))
+ return;
+
+ *(u32 *)(stm_msp->rx) = readl(MSP_DR(stm_msp->regs));
+ stm_msp->rx += (stm_msp->cur_chip->n_bytes);
+ }
+}
+
+/**
+ * stm_msp_interrupt_handler - Interrupt hanlder function
+ */
+static irqreturn_t stm_msp_interrupt_handler(int irq, void *dev_id)
+{
+ struct stm_msp *stm_msp = (struct stm_msp *)dev_id;
+ struct spi_message *msg = stm_msp->cur_msg;
+ u32 irq_status = 0;
+ u32 flag = 0;
+
+ if (!msg) {
+ dev_err(&stm_msp->adev->dev,
+ "Bad message state in interrupt handler");
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ /* Read the Interrupt Status Register */
+ irq_status = readl(MSP_MIS(stm_msp->regs));
+
+ if (irq_status) {
+ if (irq_status & MSP_MIS_MASK_ROEMIS) { /* Overrun interrupt */
+ /* Bail out our Data has been corrupted */
+ dev_dbg(&stm_msp->adev->dev,
+ "Received ROR interrupt\n");
+
+ stm_msp_controller_cmd(stm_msp, DISABLE_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, CLEAR_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, DISABLE_CONTROLLER);
+ msg->state = ERROR_STATE;
+ tasklet_schedule(&stm_msp->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ stm_msp->read(stm_msp);
+ stm_msp->write(stm_msp);
+
+ if ((stm_msp->tx == stm_msp->tx_end) && (flag == 0)) {
+ flag = 1;
+ /* Disable Transmit interrupt */
+ writel(readl(MSP_IMSC(stm_msp->regs)) &
+ (~MSP_IMSC_MASK_TXIM) & (~MSP_IMSC_MASK_TFOIM),
+ (stm_msp->regs + 0x14));
+ }
+
+ /* Clearing any Xmit underrun error. Overrun already handled */
+ stm_msp_controller_cmd(stm_msp, CLEAR_ALL_INTERRUPT);
+
+ if (stm_msp->rx == stm_msp->rx_end) {
+ stm_msp_controller_cmd(stm_msp, DISABLE_ALL_INTERRUPT);
+ stm_msp_controller_cmd(stm_msp, CLEAR_ALL_INTERRUPT);
+
+ dev_dbg(&stm_msp->adev->dev,
+ "Interrupt transfer completed.\n");
+
+ /* Update total bytes transfered */
+ msg->actual_length += stm_msp->cur_transfer->len;
+
+ if (stm_msp->cur_transfer->cs_change)
+ stm_msp->cur_chip->cs_control(
+ SPI_CHIP_DESELECT);
+
+ /* Move to next transfer */
+ msg->state = next_transfer(stm_msp);
+ tasklet_schedule(&stm_msp->pump_transfers);
+ return IRQ_HANDLED;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * stm_msp_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void stm_msp_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+ struct stm_msp *stm_msp = spi_master_get_devdata(spi->master);
+ struct spi_master *master;
+ master = stm_msp->master;
+
+ if (chip) {
+ kfree(chip->ctr_regs);
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+ }
+}
+
+/**
+ * null_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+static void null_cs_control(u32 command)
+{
+ /* Nothing to do */
+ (void)command;
+}
+
+static int verify_msp_controller_parameters(struct stm_msp_config_chip
+ *chip_info)
+{
+
+ /* FIXME: check clock params */
+ if ((chip_info->lbm != SPI_LOOPBACK_ENABLED) &&
+ (chip_info->lbm != SPI_LOOPBACK_DISABLED)) {
+ dev_dbg(chip_info->dev,
+ "Loopback Mode is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->hierarchy != SPI_MASTER) &&
+ (chip_info->hierarchy != SPI_SLAVE)) {
+ dev_dbg(chip_info->dev,
+ "hierarchy is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->endian_rx != SPI_FIFO_MSB) &&
+ (chip_info->endian_rx != SPI_FIFO_LSB)) {
+ dev_dbg(chip_info->dev,
+ "Rx FIFO endianess is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->endian_tx != SPI_FIFO_MSB) &&
+ (chip_info->endian_tx != SPI_FIFO_LSB)) {
+ dev_dbg(chip_info->dev,
+ "Tx FIFO endianess is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->data_size < MSP_DATA_BITS_8) ||
+ (chip_info->data_size > MSP_DATA_BITS_32)) {
+ dev_dbg(chip_info->dev,
+ "MSP DATA Size is configured incorrectly\n");
+ return -1;
+ }
+ if ((chip_info->com_mode != SPI_INTERRUPT_TRANSFER) &&
+ (chip_info->com_mode != SPI_POLLING_TRANSFER)) {
+ dev_dbg(chip_info->dev,
+ "Communication mode is configured incorrectly\n");
+ return -1;
+ }
+ if (((chip_info->proto_params).clk_phase !=
+ SPI_CLK_ZERO_CYCLE_DELAY) &&
+ ((chip_info->proto_params).clk_phase !=
+ SPI_CLK_HALF_CYCLE_DELAY)) {
+ dev_dbg(chip_info->dev,
+ "Clock Phase is configured incorrectly\n");
+ return -1;
+ }
+ if (((chip_info->proto_params).clk_pol !=
+ SPI_CLK_POL_IDLE_LOW) &&
+ ((chip_info->proto_params).clk_pol !=
+ SPI_CLK_POL_IDLE_HIGH)) {
+ dev_dbg(chip_info->dev,
+ "Clk Polarity configured incorrectly\n");
+ return -1;
+ }
+ if (chip_info->cs_control == NULL) {
+ dev_dbg(chip_info->dev,
+ "Chip Select Function is NULL for this chip\n");
+ chip_info->cs_control = null_cs_control;
+ }
+ return 0;
+}
+
+static struct stm_msp_config_chip *allocate_default_msp_chip_cfg(
+ struct spi_device *spi)
+{
+ struct stm_msp_config_chip *chip_info;
+
+ chip_info = kzalloc(sizeof(struct stm_msp_config_chip), GFP_KERNEL);
+
+ if (!chip_info) {
+ dev_err(&spi->dev, "setup - cannot allocate controller data");
+ return NULL;
+ }
+ dev_dbg(&spi->dev, "Allocated Memory for controller data\n");
+
+ chip_info->lbm = SPI_LOOPBACK_DISABLED;
+ chip_info->com_mode = SPI_POLLING_TRANSFER;
+ chip_info->hierarchy = SPI_MASTER;
+ chip_info->endian_tx = SPI_FIFO_LSB;
+ chip_info->endian_rx = SPI_FIFO_LSB;
+ chip_info->data_size = MSP_DATA_BITS_32;
+
+ if (spi->max_speed_hz != 0)
+ chip_info->freq = spi->max_speed_hz;
+ else
+ chip_info->freq = SPI_DEFAULT_MAX_SPEED_HZ;
+
+ chip_info->proto_params.clk_phase = SPI_CLK_HALF_CYCLE_DELAY;
+ chip_info->proto_params.clk_pol = SPI_CLK_POL_IDLE_LOW;
+ chip_info->cs_control = null_cs_control;
+
+ return chip_info;
+}
+
+static void stm_msp_delay(struct stm_msp *stm_msp)
+{
+ udelay(15);
+
+ while (readl(MSP_FLR(stm_msp->regs)) &
+ (MSP_FLR_MASK_RBUSY | MSP_FLR_MASK_TBUSY))
+ udelay(1);
+}
+
+/**
+ * stm_msp_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info.
+ */
+static int stm_msp_setup(struct spi_device *spi)
+{
+ struct stm_msp_config_chip *chip_info;
+ struct chip_data *curr_cfg;
+ struct spi_master *master;
+ int status = 0;
+ u16 sckdiv = 0;
+ s16 bus_num = 0;
+ struct stm_msp *stm_msp = spi_master_get_devdata(spi->master);
+ struct msp_regs *msp_regs;
+ master = stm_msp->master;
+ bus_num = master->bus_num - 1;
+
+ /* Get controller data */
+ chip_info = spi->controller_data;
+ /* Get controller_state */
+ curr_cfg = spi_get_ctldata(spi);
+
+ if (curr_cfg == NULL) {
+ curr_cfg = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+
+ if (!curr_cfg) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - cannot allocate controller state");
+ return -ENOMEM;
+ }
+
+ curr_cfg->chip_id = spi->chip_select;
+ curr_cfg->ctr_regs = kzalloc(sizeof(struct msp_regs),
+ GFP_KERNEL);
+
+ if (curr_cfg->ctr_regs == NULL) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - cannot allocate mem for regs");
+ goto err_first_setup;
+ }
+
+ dev_err(&stm_msp->adev->dev,
+ "chip Id = %d\n", curr_cfg->chip_id);
+
+ if (chip_info == NULL) {
+ chip_info = allocate_default_msp_chip_cfg(spi);
+
+ if (!chip_info) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - cannot allocate cntlr data");
+ status = -ENOMEM;
+ goto err_first_setup;
+ }
+
+ spi->controller_data = chip_info;
+ }
+ }
+
+ /* Pointer back to the SPI device */
+ chip_info->dev = &spi->dev;
+
+ if (chip_info->freq == 0) {
+ /* Calculate Specific Freq. */
+ if ((MSP_INTERNAL_CLK == chip_info->clk_freq.clk_src) ||
+ (MSP_EXTERNAL_CLK == chip_info->clk_freq.clk_src)) {
+ sckdiv = chip_info->clk_freq.sckdiv;
+ } else {
+ status = -1;
+ dev_err(&stm_msp->adev->dev,
+ "setup - controller clock data is incorrect");
+ goto err_config_params;
+ }
+ } else {
+ /* Calculate Effective Freq. */
+ sckdiv = (DEFAULT_MSP_CLK / (chip_info->freq)) - 1;
+
+ if (sckdiv > MAX_SCKDIV) {
+ dev_dbg(&stm_msp->adev->dev,
+ "SPI: Cannot set frequency less than 48Khz,"
+ "setting lowest(48 Khz)\n");
+ sckdiv = MAX_SCKDIV;
+ }
+ }
+
+ status = verify_msp_controller_parameters(chip_info);
+
+ if (status) {
+ dev_err(&stm_msp->adev->dev,
+ "setup - controller data is incorrect");
+ goto err_config_params;
+ }
+
+ /* Now set controller state based on controller data */
+ curr_cfg->xfer_type = chip_info->com_mode;
+ curr_cfg->cs_control = chip_info->cs_control;
+ curr_cfg->delay = stm_msp_delay;
+
+ curr_cfg->null_write = stm_msp_null_writer;
+ curr_cfg->null_read = stm_msp_null_reader;
+
+ if (chip_info->data_size <= MSP_DATA_BITS_8) {
+ dev_dbg(&stm_msp->adev->dev, "Less than 8 bits per word...\n");
+
+ curr_cfg->n_bytes = 1;
+ curr_cfg->read = stm_msp_u8_reader;
+ curr_cfg->write = stm_msp_u8_writer;
+ } else if (chip_info->data_size <= MSP_DATA_BITS_16) {
+ dev_dbg(&stm_msp->adev->dev, "Less than 16 bits per word...\n");
+
+ curr_cfg->n_bytes = 2;
+ curr_cfg->read = stm_msp_u16_reader;
+ curr_cfg->write = stm_msp_u16_writer;
+ } else {
+ dev_dbg(&stm_msp->adev->dev, "Less than 32 bits per word...\n");
+
+ curr_cfg->n_bytes = 4;
+ curr_cfg->read = stm_msp_u32_reader;
+ curr_cfg->write = stm_msp_u32_writer;
+ }
+
+ /* Now initialize all register settings reqd. for this chip */
+
+ msp_regs = (struct msp_regs *)(curr_cfg->ctr_regs);
+ msp_regs->gcr = 0x0;
+ msp_regs->tcf = 0x0;
+ msp_regs->rcf = 0x0;
+ msp_regs->srg = 0x0;
+ msp_regs->dmacr = 0x0;
+
+ MSP_WBITS(msp_regs->dmacr, 0x0, MSP_DMACR_MASK_RDMAE, 0);
+ MSP_WBITS(msp_regs->dmacr, 0x0, MSP_DMACR_MASK_TDMAE, 1);
+
+ /* GCR Reg Config */
+
+ MSP_WBITS(msp_regs->gcr,
+ MSP_RECEIVER_DISABLED, MSP_GCR_MASK_RXEN, 0);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_RX_FIFO_ENABLED, MSP_GCR_MASK_RFFEN, 1);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TRANSMITTER_DISABLED, MSP_GCR_MASK_TXEN, 8);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_FIFO_ENABLED, MSP_GCR_MASK_TFFEN, 9);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_FRAME_SYNC_POL_LOW, MSP_GCR_MASK_TFSPOL, 10);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_FRAME_SYNC_INT, MSP_GCR_MASK_TFSSEL, 11);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TRANSMIT_DATA_WITH_DELAY, MSP_GCR_MASK_TXDDL, 15);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_SAMPLE_RATE_GEN_ENABLE, MSP_GCR_MASK_SGEN, 16);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_CLOCK_INTERNAL, MSP_GCR_MASK_SCKSEL, 18);
+ MSP_WBITS(msp_regs->gcr,
+ MSP_FRAME_GEN_ENABLE, MSP_GCR_MASK_FGEN, 20);
+ MSP_WBITS(msp_regs->gcr,
+ SPI_BURST_MODE_DISABLE, MSP_GCR_MASK_SPIBME, 23);
+
+ if (chip_info->lbm == SPI_LOOPBACK_ENABLED)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_LOOPBACK_ENABLED, MSP_GCR_MASK_LBM, 7);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_LOOPBACK_DISABLED, MSP_GCR_MASK_LBM, 7);
+
+ if (chip_info->hierarchy == SPI_MASTER)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_IS_SPI_MASTER, MSP_GCR_MASK_TCKSEL, 14);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_IS_SPI_SLAVE, MSP_GCR_MASK_TCKSEL, 14);
+
+ if (chip_info->proto_params.clk_phase == SPI_CLK_ZERO_CYCLE_DELAY)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_SPI_PHASE_ZERO_CYCLE_DELAY,
+ MSP_GCR_MASK_SPICKM, 21);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_SPI_PHASE_HALF_CYCLE_DELAY,
+ MSP_GCR_MASK_SPICKM, 21);
+
+ if (chip_info->proto_params.clk_pol == SPI_CLK_POL_IDLE_HIGH)
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_CLOCK_POL_HIGH, MSP_GCR_MASK_TCKPOL, 13);
+ else
+ MSP_WBITS(msp_regs->gcr,
+ MSP_TX_CLOCK_POL_LOW, MSP_GCR_MASK_TCKPOL, 13);
+
+ /* RCF Reg Config */
+ MSP_WBITS(msp_regs->rcf,
+ MSP_IGNORE_RX_FRAME_SYNC_PULSE, MSP_RCF_MASK_RFSIG, 15);
+ MSP_WBITS(msp_regs->rcf,
+ MSP_RX_1BIT_DATA_DELAY, MSP_RCF_MASK_RDDLY, 13);
+
+ if (chip_info->endian_rx == SPI_FIFO_LSB)
+ MSP_WBITS(msp_regs->rcf,
+ MSP_RX_ENDIANESS_LSB, MSP_RCF_MASK_RENDN, 12);
+ else
+ MSP_WBITS(msp_regs->rcf,
+ MSP_RX_ENDIANESS_MSB, MSP_RCF_MASK_RENDN, 12);
+
+ MSP_WBITS(msp_regs->rcf, chip_info->data_size, MSP_RCF_MASK_RP1ELEN, 0);
+
+ /* TCF Reg Config */
+
+ MSP_WBITS(msp_regs->tcf,
+ MSP_IGNORE_TX_FRAME_SYNC_PULSE, MSP_TCF_MASK_TFSIG, 15);
+ MSP_WBITS(msp_regs->tcf,
+ MSP_TX_1BIT_DATA_DELAY, MSP_TCF_MASK_TDDLY, 13);
+
+ if (chip_info->endian_rx == SPI_FIFO_LSB)
+ MSP_WBITS(msp_regs->tcf,
+ MSP_TX_ENDIANESS_LSB, MSP_TCF_MASK_TENDN, 12);
+ else
+ MSP_WBITS(msp_regs->tcf,
+ MSP_TX_ENDIANESS_MSB, MSP_TCF_MASK_TENDN, 12);
+ MSP_WBITS(msp_regs->tcf, chip_info->data_size, MSP_TCF_MASK_TP1ELEN, 0);
+
+ /* SRG Reg Config */
+
+ MSP_WBITS(msp_regs->srg, sckdiv, MSP_SRG_MASK_SCKDIV, 0);
+
+ /* Save controller_state */
+ spi_set_ctldata(spi, curr_cfg);
+
+ return status;
+
+err_config_params:
+err_first_setup:
+
+ kfree(curr_cfg);
+ return status;
+}
+
+static int __init stm_msp_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct stm_msp_controller *platform_info = adev->dev.platform_data;
+ struct spi_master *master;
+ struct stm_msp *stm_msp = NULL; /* Data for this driver */
+ int irq, status = 0;
+
+ dev_info(dev, "STM MSP driver, device ID: 0x%08x\n", adev->periphid);
+
+ if (platform_info == NULL) {
+ dev_err(dev, "probe - no platform data supplied\n");
+ status = -ENODEV;
+ goto err_no_pdata;
+ }
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct stm_msp));
+
+ if (master == NULL) {
+ dev_err(dev, "probe - cannot alloc spi_master\n");
+ status = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ stm_msp = spi_master_get_devdata(master);
+ stm_msp->master = master;
+ stm_msp->master_info = platform_info;
+ stm_msp->adev = adev;
+
+ stm_msp->clk = clk_get(&adev->dev, NULL);
+
+ if (IS_ERR(stm_msp->clk)) {
+ dev_err(dev, "probe - cannot find clock\n");
+ status = PTR_ERR(stm_msp->clk);
+ goto free_master;
+ }
+
+ /* Fetch the Resources, using platform data */
+ status = amba_request_regions(adev, NULL);
+
+ if (status) {
+ status = -ENODEV;
+ goto disable_clk;
+ }
+
+ /* Get Hold of Device Register Area... */
+ stm_msp->regs = ioremap(adev->res.start, resource_size(&adev->res));
+
+ if (stm_msp->regs == NULL) {
+ status = -ENODEV;
+ goto disable_clk;
+ }
+
+ irq = adev->irq[0];
+
+ if (irq <= 0) {
+ status = -ENODEV;
+ goto err_no_iores;
+ }
+
+ stm_msp_controller_cmd(stm_msp, LOAD_DEFAULT_CONFIG);
+
+ /* Required Info for an SPI controller */
+ /* Bus Number Which Assigned to this SPI controller on this board */
+ master->bus_num = (u16) platform_info->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->setup = stm_msp_setup;
+ master->cleanup = (void *)stm_msp_cleanup;
+ master->transfer = stm_msp_transfer;
+
+ dev_dbg(dev, "BUSNO: %d\n", master->bus_num);
+
+ /* Initialize and start queue */
+ status = init_queue(stm_msp);
+
+ if (status != 0) {
+ dev_err(dev, "probe - problem initializing queue\n");
+ goto err_init_queue;
+ }
+
+ status = start_queue(stm_msp);
+
+ if (status != 0) {
+ dev_err(dev, "probe - problem starting queue\n");
+ goto err_start_queue;
+ }
+
+ amba_set_drvdata(adev, stm_msp);
+
+ dev_dbg(dev, "probe succeded\n");
+ dev_dbg(dev, "Bus No = %d, IRQ Line = %d, Virtual Addr: %x\n",
+ master->bus_num, irq, (u32)(stm_msp->regs));
+
+ status = request_irq(stm_msp->adev->irq[0],
+ stm_msp_interrupt_handler,
+ 0, stm_msp->master_info->device_name,
+ stm_msp);
+
+ if (status < 0) {
+ dev_err(dev, "probe - cannot get IRQ (%d)\n", status);
+ goto err_irq;
+ }
+
+ /* Register with the SPI framework */
+ status = spi_register_master(master);
+
+ if (status != 0) {
+ dev_err(dev, "probe - problem registering spi master\n");
+ goto err_spi_register;
+ }
+
+ return 0;
+
+err_spi_register:
+ free_irq(stm_msp->adev->irq[0], stm_msp);
+err_irq:
+err_init_queue:
+err_start_queue:
+ destroy_queue(stm_msp);
+err_no_iores:
+ iounmap(stm_msp->regs);
+disable_clk:
+ clk_put(stm_msp->clk);
+free_master:
+ spi_master_put(master);
+err_no_mem:
+err_no_pdata:
+ return status;
+}
+
+static int __exit stm_msp_remove(struct amba_device *adev)
+{
+ struct stm_msp *stm_msp = amba_get_drvdata(adev);
+ int status = 0;
+
+ if (!stm_msp)
+ return 0;
+
+ /* Remove the queue */
+ status = destroy_queue(stm_msp);
+
+ if (status != 0) {
+ dev_err(&adev->dev, "queue remove failed (%d)\n", status);
+ return status;
+ }
+
+ stm_msp_controller_cmd(stm_msp, LOAD_DEFAULT_CONFIG);
+
+ /* Release map resources */
+ iounmap(stm_msp->regs);
+ amba_release_regions(adev);
+ tasklet_disable(&stm_msp->pump_transfers);
+ free_irq(stm_msp->adev->irq[0], stm_msp);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(stm_msp->master);
+
+ clk_put(stm_msp->clk);
+
+ /* Prevent double remove */
+ amba_set_drvdata(adev, NULL);
+ dev_dbg(&adev->dev, "remove succeded\n");
+ return status;
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * stm_msp_suspend - MSP suspend function registered with PM framework.
+ * @dev: Reference to amba device structure of the device
+ * @state: power mgmt state.
+ *
+ * This function is invoked when the system is going into sleep, called
+ * by the power management framework of the linux kernel.
+ */
+static int stm_msp_suspend(struct amba_device *adev, pm_message_t state)
+{
+ struct stm_msp *stm_msp = amba_get_drvdata(adev);
+ int status = 0;
+
+ status = stop_queue(stm_msp);
+
+ if (status != 0) {
+ dev_warn(&adev->dev, "suspend cannot stop queue\n");
+ return status;
+ }
+
+ dev_dbg(&adev->dev, "suspended\n");
+ return 0;
+}
+
+/**
+ * stm_msp_resume - MSP Resume function registered with PM framework.
+ * @dev: Reference to amba device structure of the device
+ *
+ * This function is invoked when the system is coming out of sleep, called
+ * by the power management framework of the linux kernel.
+ */
+static int stm_msp_resume(struct amba_device *adev)
+{
+ struct stm_msp *stm_msp = amba_get_drvdata(adev);
+ int status = 0;
+
+ /* Start the queue running */
+ status = start_queue(stm_msp);
+
+ if (status)
+ dev_err(&adev->dev, "problem starting queue (%d)\n", status);
+ else
+ dev_dbg(&adev->dev, "resumed\n");
+
+ return status;
+}
+
+#else
+#define stm_msp_suspend NULL
+#define stm_msp_resume NULL
+#endif /* CONFIG_PM */
+
+static struct amba_id stm_msp_ids[] = {
+ {
+ .id = 0x00280021,
+ .mask = 0x00ffffff,
+ },
+ {
+ 0,
+ 0,
+ },
+};
+
+static struct amba_driver __refdata stm_msp_driver = {
+ .drv = {
+ .name = "MSP",
+ },
+ .id_table = stm_msp_ids,
+ .probe = stm_msp_probe,
+ .remove = __exit_p(stm_msp_remove),
+ .resume = stm_msp_resume,
+ .suspend = stm_msp_suspend,
+};
+
+static int __init stm_msp_init(void)
+{
+ return amba_driver_register(&stm_msp_driver);
+}
+
+static void __exit stm_msp_exit(void)
+{
+ amba_driver_unregister(&stm_msp_driver);
+}
+
+module_init(stm_msp_init);
+module_exit(stm_msp_exit);
+
+MODULE_AUTHOR("Sachin Verma <sachin.verma@st.com>");
+MODULE_DESCRIPTION("STM MSP (SPI protocol) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index eb1dee26bda..a4495997d1b 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -8,6 +8,16 @@ config ANDROID
if ANDROID
+config ANDROID_AB5500_TIMED_VIBRA
+ bool "AB5500 Timed Output Vibrator"
+ depends on AB5500_CORE
+ depends on ANDROID_TIMED_OUTPUT
+ default y
+ help
+ Say Y here to enable linear/rotary vibrator driver using timed
+ output class device for ST-Ericsson's based on ST-Ericsson's
+ AB5500 Mix-Sig PMIC
+
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
default n
@@ -53,6 +63,14 @@ config ANDROID_LOW_MEMORY_KILLER
---help---
Register processes to be killed when memory is low
+config ANDROID_STE_TIMED_VIBRA
+ bool "ST-Ericsson Timed Output Vibrator"
+ depends on SND_SOC_AB8500
+ depends on ANDROID_TIMED_OUTPUT
+ default y
+ help
+ ST-Ericsson's vibrator driver using timed output class device
+
source "drivers/staging/android/switch/Kconfig"
config ANDROID_INTF_ALARM
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 9b6c9ed91f6..0d642936a0b 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_ANDROID_AB5500_TIMED_VIBRA) += ab5500-timed-vibra.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
@@ -6,6 +7,7 @@ obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
+obj-$(CONFIG_ANDROID_STE_TIMED_VIBRA) += ste_timed_vibra.o
obj-$(CONFIG_ANDROID_SWITCH) += switch/
obj-$(CONFIG_ANDROID_INTF_ALARM) += alarm.o
obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
diff --git a/drivers/staging/android/ab5500-timed-vibra.c b/drivers/staging/android/ab5500-timed-vibra.c
new file mode 100644
index 00000000000..35c627a6285
--- /dev/null
+++ b/drivers/staging/android/ab5500-timed-vibra.c
@@ -0,0 +1,490 @@
+/*
+ * ab5500-vibra.c - driver for vibrator in ST-Ericsson AB5500 chip
+ *
+ * Copyright (C) 2011 ST-Ericsson SA.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include "timed_output.h"
+
+#include <linux/mfd/abx500.h> /* abx500_* */
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500/ab5500-gpadc.h>
+#include <linux/ab5500-vibra.h>
+
+#define AB5500_VIBRA_DEV_NAME "ab5500:vibra"
+#define AB5500_VIBRA_DRV_NAME "ab5500-vibrator"
+
+/* Vibrator Register Address Offsets */
+#define AB5500_VIB_CTRL 0x10
+#define AB5500_VIB_VOLT 0x11
+#define AB5500_VIB_FUND_FREQ 0x12 /* Linear vibra resonance freq. */
+#define AB5500_VIB_FUND_DUTY 0x13
+#define AB5500_KELVIN_ANA 0xB1
+#define AB5500_VIBRA_KELVIN 0xFE
+
+/* Vibrator Control */
+#define AB5500_VIB_DISABLE (0x80)
+#define AB5500_VIB_PWR_ON (0x40)
+#define AB5500_VIB_FUND_EN (0x20)
+#define AB5500_VIB_FREQ_SHIFT (0)
+#define AB5500_VIB_DUTY_SHIFT (3)
+#define AB5500_VIB_VOLT_SHIFT (0)
+#define AB5500_VIB_PULSE_SHIFT (4)
+#define VIBRA_KELVIN_ENABLE (0x90)
+#define VIBRA_KELVIN_VOUT (0x20)
+
+/* Vibrator Freq. (in HZ) and Duty */
+enum ab5500_vibra_freq {
+ AB5500_VIB_FREQ_1HZ = 1,
+ AB5500_VIB_FREQ_2HZ,
+ AB5500_VIB_FREQ_4HZ,
+ AB5500_VIB_FREQ_8HZ,
+};
+
+enum ab5500_vibra_duty {
+ AB5500_VIB_DUTY_100 = 0,
+ AB5500_VIB_DUTY_75 = 8,
+ AB5500_VIB_DUTY_50 = 16,
+ AB5500_VIB_DUTY_25 = 24,
+};
+
+/* Linear vibrator resonance freq. duty */
+#define AB5500_VIB_RDUTY_50 (0x7F)
+
+/* Vibration magnitudes */
+#define AB5500_VIB_FREQ_MAX (4)
+#define AB5500_VIB_DUTY_MAX (4)
+
+static u8 vib_freq[AB5500_VIB_FREQ_MAX] = {
+ AB5500_VIB_FREQ_1HZ,
+ AB5500_VIB_FREQ_2HZ,
+ AB5500_VIB_FREQ_4HZ,
+ AB5500_VIB_FREQ_8HZ,
+};
+
+static u8 vib_duty[AB5500_VIB_DUTY_MAX] = {
+ AB5500_VIB_DUTY_100,
+ AB5500_VIB_DUTY_75,
+ AB5500_VIB_DUTY_50,
+ AB5500_VIB_DUTY_25,
+};
+
+/**
+ * struct ab5500_vibra - Vibrator driver interal info.
+ * @tdev: Pointer to timed output device structure
+ * @dev: Reference to vibra device structure
+ * @vibra_workqueue: Pointer to vibrator workqueue structure
+ * @vibra_work: Vibrator work
+ * @gpadc: Gpadc instance
+ * @vibra_wait: Vibrator wait queue head
+ * @vibra_lock: Vibrator lock
+ * @timeout_ms: Indicates how long time the vibrator will be enabled
+ * @timeout_start: Start of vibrator in jiffies
+ * @pdata: Local pointer to platform data with vibrator parameters
+ * @magnitude: required vibration strength
+ * @enable: Vibrator running status
+ * @eol: Vibrator end of life(eol) status
+ **/
+struct ab5500_vibra {
+ struct timed_output_dev tdev;
+ struct device *dev;
+ struct workqueue_struct *vibra_workqueue;
+ struct work_struct vibra_work;
+ struct ab5500_gpadc *gpadc;
+ wait_queue_head_t vibra_wait;
+ spinlock_t vibra_lock;
+ unsigned int timeout_ms;
+ unsigned long timeout_start;
+ struct ab5500_vibra_platform_data *pdata;
+ u8 magnitude;
+ bool enable;
+ bool eol;
+};
+
+static inline u8 vibra_magnitude(u8 mag)
+{
+ mag /= (AB5500_VIB_FREQ_MAX * AB5500_VIB_DUTY_MAX);
+ mag = vib_freq[mag / AB5500_VIB_FREQ_MAX] << AB5500_VIB_FREQ_SHIFT;
+ mag |= vib_duty[mag % AB5500_VIB_DUTY_MAX] << AB5500_VIB_DUTY_SHIFT;
+
+ return mag;
+}
+
+static int ab5500_setup_vibra_kelvin(struct ab5500_vibra* vibra)
+{
+ int ret;
+
+ /* Establish the kelvin IP connection to be measured */
+ ret = abx500_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP,
+ AB5500_KELVIN_ANA, VIBRA_KELVIN_ENABLE);
+ if (ret < 0) {
+ dev_err(vibra->dev, "failed to set kelvin network\n");
+ return ret;
+ }
+
+ /* Select vibra parameter to be measured */
+ ret = abx500_set_register_interruptible(vibra->dev, AB5500_BANK_VIBRA,
+ AB5500_VIBRA_KELVIN, VIBRA_KELVIN_VOUT);
+ if (ret < 0)
+ dev_err(vibra->dev, "failed to select the kelvin param\n");
+
+ return ret;
+}
+
+static int ab5500_vibra_start(struct ab5500_vibra* vibra)
+{
+ u8 ctrl = 0;
+
+ ctrl = AB5500_VIB_PWR_ON |
+ vibra_magnitude(vibra->magnitude);
+
+ if (vibra->pdata->type == AB5500_VIB_LINEAR)
+ ctrl |= AB5500_VIB_FUND_EN;
+
+ return abx500_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIBRA, AB5500_VIB_CTRL, ctrl);
+}
+
+static int ab5500_vibra_stop(struct ab5500_vibra* vibra)
+{
+ return abx500_mask_and_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIBRA, AB5500_VIB_CTRL,
+ AB5500_VIB_PWR_ON, 0);
+}
+
+static int ab5500_vibra_eol_check(struct ab5500_vibra* vibra)
+{
+ int ret, vout;
+
+ ret = ab5500_setup_vibra_kelvin(vibra);
+ if (ret < 0) {
+ dev_err(vibra->dev, "failed to setup kelvin network\n");
+ return ret;
+ }
+
+ /* Start vibra to measure voltage */
+ ret = ab5500_vibra_start(vibra);
+ if (ret < 0) {
+ dev_err(vibra->dev, "failed to start vibra\n");
+ return ret;
+ }
+ /* 20ms delay required for voltage rampup */
+ wait_event_interruptible_timeout(vibra->vibra_wait,
+ 0, msecs_to_jiffies(20));
+
+ vout = ab5500_gpadc_convert(vibra->gpadc, VIBRA_KELVIN);
+ if (vout < 0) {
+ dev_err(vibra->dev, "failed to read gpadc vibra\n");
+ return vout;
+ }
+
+ /* Stop vibra after measuring voltage */
+ ret = ab5500_vibra_stop(vibra);
+ if (ret < 0) {
+ dev_err(vibra->dev, "failed to stop vibra\n");
+ return ret;
+ }
+ /* Check for vibra eol condition */
+ if (vout < vibra->pdata->eol_voltage) {
+ vibra->eol = true;
+ dev_err(vibra->dev, "Vibra eol detected. Disabling vibra!\n");
+ }
+
+ return ret;
+}
+
+/**
+ * ab5500_vibra_work() - Vibrator work, turns on/off vibrator
+ * @work: Pointer to work structure
+ *
+ * This function is called from workqueue, turns on/off vibrator
+ **/
+static void ab5500_vibra_work(struct work_struct *work)
+{
+ struct ab5500_vibra *vibra = container_of(work,
+ struct ab5500_vibra, vibra_work);
+ unsigned long flags;
+ int ret;
+
+ ret = ab5500_vibra_start(vibra);
+ if (ret < 0)
+ dev_err(vibra->dev, "reg[%d] w failed: %d\n",
+ AB5500_VIB_CTRL, ret);
+
+ wait_event_interruptible_timeout(vibra->vibra_wait,
+ 0, msecs_to_jiffies(vibra->timeout_ms));
+
+ ret = ab5500_vibra_stop(vibra);
+ if (ret < 0)
+ dev_err(vibra->dev, "reg[%d] w failed: %d\n",
+ AB5500_VIB_CTRL, ret);
+
+ spin_lock_irqsave(&vibra->vibra_lock, flags);
+
+ vibra->timeout_start = 0;
+ vibra->enable = false;
+
+ spin_unlock_irqrestore(&vibra->vibra_lock, flags);
+}
+
+/**
+ * vibra_enable() - Enables vibrator
+ * @tdev: Pointer to timed output device structure
+ * @timeout: Time indicating how long vibrator will be enabled
+ *
+ * This function enables vibrator
+ **/
+static void vibra_enable(struct timed_output_dev *tdev, int timeout)
+{
+ struct ab5500_vibra *vibra = dev_get_drvdata(tdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vibra->vibra_lock, flags);
+
+ if ((!vibra->enable || timeout) && !vibra->eol) {
+ vibra->enable = true;
+
+ vibra->timeout_ms = timeout;
+ vibra->timeout_start = jiffies;
+ queue_work(vibra->vibra_workqueue, &vibra->vibra_work);
+ }
+
+ spin_unlock_irqrestore(&vibra->vibra_lock, flags);
+}
+
+/**
+ * vibra_get_time() - Returns remaining time to disabling vibration
+ * @tdev: Pointer to timed output device structure
+ *
+ * This function returns time remaining to disabling vibration
+ *
+ * Returns:
+ * Returns remaining time to disabling vibration
+ **/
+static int vibra_get_time(struct timed_output_dev *tdev)
+{
+ struct ab5500_vibra *vibra = dev_get_drvdata(tdev->dev);
+ unsigned int ms;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vibra->vibra_lock, flags);
+
+ if (vibra->enable)
+ ms = jiffies_to_msecs(vibra->timeout_start +
+ msecs_to_jiffies(vibra->timeout_ms) - jiffies);
+ else
+ ms = 0;
+
+ spin_unlock_irqrestore(&vibra->vibra_lock, flags);
+
+ return ms;
+}
+
+static int ab5500_vibra_reg_init(struct ab5500_vibra *vibra)
+{
+ int ret = 0;
+ u8 ctrl = 0;
+ u8 pulse = 0;
+
+ ctrl = (AB5500_VIB_DUTY_50 << AB5500_VIB_DUTY_SHIFT) |
+ (AB5500_VIB_FREQ_8HZ << AB5500_VIB_FREQ_SHIFT);
+
+ if (vibra->pdata->type == AB5500_VIB_LINEAR) {
+ ctrl |= AB5500_VIB_FUND_EN;
+
+ if (vibra->pdata->voltage > AB5500_VIB_VOLT_MAX)
+ vibra->pdata->voltage = AB5500_VIB_VOLT_MAX;
+
+ pulse = (vibra->pdata->pulse << AB5500_VIB_PULSE_SHIFT) |
+ (vibra->pdata->voltage << AB5500_VIB_VOLT_SHIFT);
+ ret = abx500_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIBRA, AB5500_VIB_VOLT,
+ pulse);
+ if (ret < 0) {
+ dev_err(vibra->dev,
+ "reg[%#x] w %#x failed: %d\n",
+ AB5500_VIB_VOLT, vibra->pdata->voltage, ret);
+ return ret;
+ }
+
+ ret = abx500_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIBRA, AB5500_VIB_FUND_FREQ,
+ vibra->pdata->res_freq);
+ if (ret < 0) {
+ dev_err(vibra->dev, "reg[%#x] w %#x failed: %d\n",
+ AB5500_VIB_FUND_FREQ,
+ vibra->pdata->res_freq, ret);
+ return ret;
+ }
+
+ ret = abx500_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIBRA, AB5500_VIB_FUND_DUTY,
+ AB5500_VIB_RDUTY_50);
+ if (ret < 0) {
+ dev_err(vibra->dev, "reg[%#x] w %#x failed: %d\n",
+ AB5500_VIB_FUND_DUTY,
+ AB5500_VIB_RDUTY_50, ret);
+ return ret;
+ }
+ }
+
+ ret = abx500_set_register_interruptible(vibra->dev,
+ AB5500_BANK_VIBRA, AB5500_VIB_CTRL, ctrl);
+ if (ret < 0) {
+ dev_err(vibra->dev, "reg[%#x] w %#x failed: %d\n",
+ AB5500_VIB_CTRL, ctrl, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ab5500_vibra_register_dev(struct ab5500_vibra *vibra,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = timed_output_dev_register(&vibra->tdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register timed output device\n");
+ goto err_out;
+ }
+
+ dev_set_drvdata(vibra->tdev.dev, vibra);
+
+
+ /* Create workqueue just for timed output vibrator */
+ vibra->vibra_workqueue =
+ create_singlethread_workqueue("ste-timed-output-vibra");
+ if (!vibra->vibra_workqueue) {
+ dev_err(&pdev->dev, "failed to allocate workqueue\n");
+ ret = -ENOMEM;
+ goto exit_output_unregister;
+ }
+
+ init_waitqueue_head(&vibra->vibra_wait);
+ INIT_WORK(&vibra->vibra_work, ab5500_vibra_work);
+ spin_lock_init(&vibra->vibra_lock);
+
+ platform_set_drvdata(pdev, vibra);
+
+ return ret;
+
+exit_output_unregister:
+ timed_output_dev_unregister(&vibra->tdev);
+err_out:
+ return ret;
+}
+
+static int __devinit ab5500_vibra_probe(struct platform_device *pdev)
+{
+ struct ab5500_vibra_platform_data *pdata = pdev->dev.platform_data;
+ struct ab5500_vibra *vibra = NULL;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data required. Quitting...\n");
+ return -ENODEV;
+ }
+
+ vibra = kzalloc(sizeof(struct ab5500_vibra), GFP_KERNEL);
+ if (vibra == NULL)
+ return -ENOMEM;
+
+ vibra->tdev.name = "vibrator";
+ vibra->tdev.enable = vibra_enable;
+ vibra->tdev.get_time = vibra_get_time;
+ vibra->timeout_start = 0;
+ vibra->enable = false;
+ vibra->magnitude = pdata->magnitude;
+ vibra->pdata = pdata;
+ vibra->dev = &pdev->dev;
+
+ if (vibra->pdata->eol_voltage) {
+ vibra->gpadc = ab5500_gpadc_get("ab5500-adc.0");
+ if (IS_ERR(vibra->gpadc))
+ goto err_alloc;
+ }
+
+ if (vibra->pdata->type == AB5500_VIB_LINEAR)
+ dev_info(&pdev->dev, "Linear Type Vibrators\n");
+ else
+ dev_info(&pdev->dev, "Rotary Type Vibrators\n");
+
+ ret = ab5500_vibra_reg_init(vibra);
+ if (ret < 0)
+ goto err_alloc;
+
+ ret = ab5500_vibra_register_dev(vibra, pdev);
+ if (ret < 0)
+ goto err_alloc;
+
+ /* Perform vibra eol diagnostics if eol_voltage is set */
+ if (vibra->pdata->eol_voltage) {
+ ret = ab5500_vibra_eol_check(vibra);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "EOL check failed\n");
+ }
+
+ dev_info(&pdev->dev, "initialization success\n");
+
+ return ret;
+
+err_alloc:
+ kfree(vibra);
+
+ return ret;
+}
+
+static int __devexit ab5500_vibra_remove(struct platform_device *pdev)
+{
+ struct ab5500_vibra *vibra = platform_get_drvdata(pdev);
+
+ timed_output_dev_unregister(&vibra->tdev);
+ destroy_workqueue(vibra->vibra_workqueue);
+ kfree(vibra);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ab5500_vibra_driver = {
+ .probe = ab5500_vibra_probe,
+ .remove = __devexit_p(ab5500_vibra_remove),
+ .driver = {
+ .name = AB5500_VIBRA_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ab5500_vibra_module_init(void)
+{
+ return platform_driver_register(&ab5500_vibra_driver);
+}
+
+static void __exit ab5500_vibra_module_exit(void)
+{
+ platform_driver_unregister(&ab5500_vibra_driver);
+}
+
+module_init(ab5500_vibra_module_init);
+module_exit(ab5500_vibra_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
+MODULE_DESCRIPTION("Timed Output Driver for AB5500 Vibrator");
+
diff --git a/drivers/staging/android/ste_timed_vibra.c b/drivers/staging/android/ste_timed_vibra.c
new file mode 100644
index 00000000000..4621b2fb441
--- /dev/null
+++ b/drivers/staging/android/ste_timed_vibra.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>
+ * for ST-Ericsson
+ * License Terms: GNU General Public License v2
+ */
+
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/ste_timed_vibra.h>
+#include <linux/delay.h>
+#include "timed_output.h"
+
+/**
+ * struct vibra_info - Vibrator information structure
+ * @tdev: Pointer to timed output device structure
+ * @linear_workqueue: Pointer to linear vibrator workqueue structure
+ * @linear_work: Linear Vibrator work
+ * @linear_tick: Linear Vibrator high resolution timer
+ * @vibra_workqueue: Pointer to vibrator workqueue structure
+ * @vibra_work: Vibrator work
+ * @vibra_timer: Vibrator high resolution timer
+ * @vibra_lock: Vibrator lock
+ * @vibra_state: Actual vibrator state
+ * @state_force: Indicates if oppositive state is requested
+ * @timeout: Indicates how long time the vibrator will be enabled
+ * @time_passed: Total time passed in states
+ * @pdata: Local pointer to platform data with vibrator parameters
+ *
+ * Structure vibra_info holds vibrator information
+ **/
+struct vibra_info {
+ struct timed_output_dev tdev;
+ struct workqueue_struct *linear_workqueue;
+ struct work_struct linear_work;
+ struct hrtimer linear_tick;
+ struct workqueue_struct *vibra_workqueue;
+ struct work_struct vibra_work;
+ struct hrtimer vibra_timer;
+ spinlock_t vibra_lock;
+ enum ste_timed_vibra_states vibra_state;
+ bool state_force;
+ unsigned int timeout;
+ unsigned int time_passed;
+ struct ste_timed_vibra_platform_data *pdata;
+};
+
+/*
+ * Linear vibrator hardware operates on a particular resonance
+ * frequency. The resonance frequency (f) may also vary with h/w.
+ * This define is half time period (t) in micro seconds (us).
+ * For resonance frequency f = 150 Hz
+ * t = T/2 = ((1/150) / 2) = 3333 usec.
+ */
+#define LINEAR_RESONANCE 3333
+
+/**
+ * linear_vibra_work() - Linear Vibrator work, turns on/off vibrator
+ * @work: Pointer to work structure
+ *
+ * This function is called from workqueue, turns on/off vibrator
+ **/
+static void linear_vibra_work(struct work_struct *work)
+{
+ struct vibra_info *vinfo =
+ container_of(work, struct vibra_info, linear_work);
+ unsigned char speed_pos = 0, speed_neg = 0;
+ ktime_t ktime;
+ static unsigned char toggle;
+
+ if (toggle) {
+ speed_pos = vinfo->pdata->boost_level;
+ speed_neg = 0;
+ } else {
+ speed_neg = vinfo->pdata->boost_level;
+ speed_pos = 0;
+ }
+
+ toggle = !toggle;
+ vinfo->pdata->timed_vibra_control(speed_pos, speed_neg,
+ speed_pos, speed_neg);
+
+ if ((vinfo->vibra_state != STE_VIBRA_IDLE) &&
+ (vinfo->vibra_state != STE_VIBRA_OFF)) {
+ ktime = ktime_set((LINEAR_RESONANCE / USEC_PER_SEC),
+ (LINEAR_RESONANCE % USEC_PER_SEC) * NSEC_PER_USEC);
+ hrtimer_start(&vinfo->linear_tick, ktime, HRTIMER_MODE_REL);
+ }
+}
+
+/**
+ * vibra_control_work() - Vibrator work, turns on/off vibrator
+ * @work: Pointer to work structure
+ *
+ * This function is called from workqueue, turns on/off vibrator
+ **/
+static void vibra_control_work(struct work_struct *work)
+{
+ struct vibra_info *vinfo =
+ container_of(work, struct vibra_info, vibra_work);
+ unsigned val = 0;
+ unsigned char speed_pos = 0, speed_neg = 0;
+ unsigned long flags;
+
+ /*
+ * Cancel scheduled timer if it has not started
+ * else it will wait for timer callback to complete.
+ * It should be done before taking vibra_lock to
+ * prevent race condition, as timer callback also
+ * takes same lock.
+ */
+ hrtimer_cancel(&vinfo->vibra_timer);
+
+ spin_lock_irqsave(&vinfo->vibra_lock, flags);
+
+ switch (vinfo->vibra_state) {
+ case STE_VIBRA_BOOST:
+ /* Turn on both vibrators with boost speed */
+ speed_pos = vinfo->pdata->boost_level;
+ val = vinfo->pdata->boost_time;
+ break;
+ case STE_VIBRA_ON:
+ /* Turn on both vibrators with speed */
+ speed_pos = vinfo->pdata->on_level;
+ val = vinfo->timeout - vinfo->pdata->boost_time;
+ break;
+ case STE_VIBRA_OFF:
+ /* Turn on both vibrators with reversed speed */
+ speed_neg = vinfo->pdata->off_level;
+ val = vinfo->pdata->off_time;
+ break;
+ case STE_VIBRA_IDLE:
+ vinfo->time_passed = 0;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&vinfo->vibra_lock, flags);
+
+ /* Send new settings (only for rotary vibrators) */
+ if (!vinfo->pdata->is_linear_vibra)
+ vinfo->pdata->timed_vibra_control(speed_pos, speed_neg,
+ speed_pos, speed_neg);
+
+ if (vinfo->vibra_state != STE_VIBRA_IDLE) {
+ /* Start timer if it's not in IDLE state */
+ ktime_t ktime;
+ ktime = ktime_set((val / MSEC_PER_SEC),
+ (val % MSEC_PER_SEC) * NSEC_PER_MSEC),
+ hrtimer_start(&vinfo->vibra_timer, ktime, HRTIMER_MODE_REL);
+ } else if (vinfo->pdata->is_linear_vibra) {
+ /* Cancel work and timers of linear vibrator in IDLE state */
+ hrtimer_cancel(&vinfo->linear_tick);
+ flush_workqueue(vinfo->linear_workqueue);
+ vinfo->pdata->timed_vibra_control(0, 0, 0, 0);
+ }
+}
+
+/**
+ * vibra_enable() - Enables vibrator
+ * @tdev: Pointer to timed output device structure
+ * @timeout: Time indicating how long vibrator will be enabled
+ *
+ * This function enables vibrator
+ **/
+static void vibra_enable(struct timed_output_dev *tdev, int timeout)
+{
+ struct vibra_info *vinfo = dev_get_drvdata(tdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vinfo->vibra_lock, flags);
+ switch (vinfo->vibra_state) {
+ case STE_VIBRA_IDLE:
+ if (timeout)
+ vinfo->vibra_state = STE_VIBRA_BOOST;
+ else /* Already disabled */
+ break;
+
+ vinfo->state_force = false;
+ /* Trim timeout */
+ vinfo->timeout = timeout < vinfo->pdata->boost_time ?
+ vinfo->pdata->boost_time : timeout;
+
+ if (vinfo->pdata->is_linear_vibra)
+ queue_work(vinfo->linear_workqueue,
+ &vinfo->linear_work);
+ queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work);
+ break;
+ case STE_VIBRA_BOOST:
+ /* Force only when user requested OFF while BOOST */
+ if (!timeout)
+ vinfo->state_force = true;
+ break;
+ case STE_VIBRA_ON:
+ /* If user requested OFF */
+ if (!timeout) {
+ if (vinfo->pdata->is_linear_vibra)
+ hrtimer_cancel(&vinfo->linear_tick);
+ /* Cancel timer if it has not expired yet.
+ * Else setting the vibra_state to STE_VIBRA_OFF
+ * will make take care that vibrator will move to
+ * STE_VIBRA_IDLE in timer callback just after
+ * this function call.
+ */
+ hrtimer_try_to_cancel(&vinfo->vibra_timer);
+ vinfo->vibra_state = STE_VIBRA_OFF;
+ queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work);
+ }
+ break;
+ case STE_VIBRA_OFF:
+ /* Force only when user requested ON while OFF */
+ if (timeout)
+ vinfo->state_force = true;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&vinfo->vibra_lock, flags);
+}
+
+/**
+ * linear_vibra_tick() - Generate resonance frequency waveform
+ * @hrtimer: Pointer to high resolution timer structure
+ *
+ * This function helps in generating the resonance frequency
+ * waveform required for linear vibrators
+ *
+ * Returns:
+ * Returns value which indicates whether hrtimer should be restarted
+ **/
+static enum hrtimer_restart linear_vibra_tick(struct hrtimer *hrtimer)
+{
+ struct vibra_info *vinfo =
+ container_of(hrtimer, struct vibra_info, linear_tick);
+
+ if ((vinfo->vibra_state != STE_VIBRA_IDLE) &&
+ (vinfo->vibra_state != STE_VIBRA_OFF)) {
+ queue_work(vinfo->linear_workqueue, &vinfo->linear_work);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * vibra_timer_expired() - Handles vibrator machine state
+ * @hrtimer: Pointer to high resolution timer structure
+ *
+ * This function handles vibrator machine state
+ *
+ * Returns:
+ * Returns value which indicates wether hrtimer should be restarted
+ **/
+static enum hrtimer_restart vibra_timer_expired(struct hrtimer *hrtimer)
+{
+ struct vibra_info *vinfo =
+ container_of(hrtimer, struct vibra_info, vibra_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vinfo->vibra_lock, flags);
+ switch (vinfo->vibra_state) {
+ case STE_VIBRA_BOOST:
+ /* If BOOST finished and force, go to OFF */
+ if (vinfo->state_force)
+ vinfo->vibra_state = STE_VIBRA_OFF;
+ else
+ vinfo->vibra_state = STE_VIBRA_ON;
+ vinfo->time_passed = vinfo->pdata->boost_time;
+ break;
+ case STE_VIBRA_ON:
+ vinfo->vibra_state = STE_VIBRA_OFF;
+ vinfo->time_passed = vinfo->timeout;
+ break;
+ case STE_VIBRA_OFF:
+ /* If OFF finished and force, go to ON */
+ if (vinfo->state_force)
+ vinfo->vibra_state = STE_VIBRA_ON;
+ else
+ vinfo->vibra_state = STE_VIBRA_IDLE;
+ vinfo->time_passed += vinfo->pdata->off_time;
+ break;
+ case STE_VIBRA_IDLE:
+ break;
+ default:
+ break;
+ }
+ vinfo->state_force = false;
+ spin_unlock_irqrestore(&vinfo->vibra_lock, flags);
+
+ queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * vibra_get_time() - Returns remaining time to disabling vibration
+ * @tdev: Pointer to timed output device structure
+ *
+ * This function returns time remaining to disabling vibration
+ *
+ * Returns:
+ * Returns remaining time to disabling vibration
+ **/
+static int vibra_get_time(struct timed_output_dev *tdev)
+{
+ struct vibra_info *vinfo = dev_get_drvdata(tdev->dev);
+ u32 ms;
+
+ if (hrtimer_active(&vinfo->vibra_timer)) {
+ ktime_t remain = hrtimer_get_remaining(&vinfo->vibra_timer);
+ ms = (u32) ktime_to_ms(remain);
+ return ms + vinfo->time_passed;
+ } else
+ return 0;
+}
+
+static int __devinit ste_timed_vibra_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct vibra_info *vinfo;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -ENODEV;
+ }
+
+ vinfo = kmalloc(sizeof *vinfo, GFP_KERNEL);
+ if (!vinfo) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vinfo->tdev.name = "vibrator";
+ vinfo->tdev.enable = vibra_enable;
+ vinfo->tdev.get_time = vibra_get_time;
+ vinfo->time_passed = 0;
+ vinfo->vibra_state = STE_VIBRA_IDLE;
+ vinfo->state_force = false;
+ vinfo->pdata = pdev->dev.platform_data;
+
+ if (vinfo->pdata->is_linear_vibra)
+ dev_info(&pdev->dev, "Linear Type Vibrators\n");
+ else
+ dev_info(&pdev->dev, "Rotary Type Vibrators\n");
+
+ ret = timed_output_dev_register(&vinfo->tdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register timed output device\n");
+ goto exit_free_vinfo;
+ }
+
+ dev_set_drvdata(vinfo->tdev.dev, vinfo);
+
+ vinfo->linear_workqueue =
+ create_singlethread_workqueue("ste-timed-linear-vibra");
+ if (!vinfo->linear_workqueue) {
+ dev_err(&pdev->dev, "failed to allocate workqueue\n");
+ ret = -ENOMEM;
+ goto exit_timed_output_unregister;
+ }
+
+ /* Create workqueue just for timed output vibrator */
+ vinfo->vibra_workqueue =
+ create_singlethread_workqueue("ste-timed-output-vibra");
+ if (!vinfo->vibra_workqueue) {
+ dev_err(&pdev->dev, "failed to allocate workqueue\n");
+ ret = -ENOMEM;
+ goto exit_destroy_workqueue;
+ }
+
+ INIT_WORK(&vinfo->linear_work, linear_vibra_work);
+ INIT_WORK(&vinfo->vibra_work, vibra_control_work);
+ spin_lock_init(&vinfo->vibra_lock);
+ hrtimer_init(&vinfo->linear_tick, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&vinfo->vibra_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vinfo->linear_tick.function = linear_vibra_tick;
+ vinfo->vibra_timer.function = vibra_timer_expired;
+
+ platform_set_drvdata(pdev, vinfo);
+ return 0;
+
+exit_destroy_workqueue:
+ destroy_workqueue(vinfo->linear_workqueue);
+exit_timed_output_unregister:
+ timed_output_dev_unregister(&vinfo->tdev);
+exit_free_vinfo:
+ kfree(vinfo);
+ return ret;
+}
+
+static int __devexit ste_timed_vibra_remove(struct platform_device *pdev)
+{
+ struct vibra_info *vinfo = platform_get_drvdata(pdev);
+
+ timed_output_dev_unregister(&vinfo->tdev);
+ destroy_workqueue(vinfo->linear_workqueue);
+ destroy_workqueue(vinfo->vibra_workqueue);
+ kfree(vinfo);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ste_timed_vibra_driver = {
+ .driver = {
+ .name = "ste_timed_output_vibra",
+ .owner = THIS_MODULE,
+ },
+ .probe = ste_timed_vibra_probe,
+ .remove = __devexit_p(ste_timed_vibra_remove)
+};
+
+static int __init ste_timed_vibra_init(void)
+{
+ return platform_driver_register(&ste_timed_vibra_driver);
+}
+module_init(ste_timed_vibra_init);
+
+static void __exit ste_timed_vibra_exit(void)
+{
+ platform_driver_unregister(&ste_timed_vibra_driver);
+}
+module_exit(ste_timed_vibra_exit);
+
+MODULE_AUTHOR("Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>");
+MODULE_DESCRIPTION("STE Timed Output Vibrator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
index a272e488e5b..545e03d31fc 100644
--- a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
+++ b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
@@ -20,8 +20,9 @@
static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = {
.irq_number = NOMADIK_GPIO_TO_IRQ(84),
.irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
- .x_flip = false,
- .y_flip = true,
+ .x_flip = true,
+ .y_flip = false,
+ .regulator_en = true,
};
struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 11728a03f8a..fd7fed743f7 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1,11 +1,10 @@
-/**
- *
+/*
* Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
* Copyright (c) 2007-2010, Synaptics Incorporated
*
* Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Copyright 2010 (c) ST-Ericsson AB
+ * Copyright 2010 (c) ST-Ericsson SA
*/
/*
* This file is licensed under the GPL2 license.
@@ -27,6 +26,7 @@
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
@@ -36,8 +36,10 @@
/* TODO: for multiple device support will need a per-device mutex */
#define DRIVER_NAME "synaptics_rmi4_i2c"
+#define DELTA 8
#define MAX_ERROR_REPORT 6
-#define MAX_TOUCH_MAJOR 15
+#define TIMEOUT_PERIOD 1
+#define MAX_WIDTH_MAJOR 255
#define MAX_RETRY_COUNT 5
#define STD_QUERY_LEN 21
#define PAGE_LEN 2
@@ -45,6 +47,7 @@
#define BUF_LEN 37
#define QUERY_LEN 9
#define DATA_LEN 12
+#define RESUME_DELAY 100 /* msecs */
#define HAS_TAP 0x01
#define HAS_PALMDETECT 0x01
#define HAS_ROTATE 0x02
@@ -164,6 +167,8 @@ struct synaptics_rmi4_device_info {
* @regulator: pointer to the regulator structure
* @wait: wait queue structure variable
* @touch_stopped: flag to stop the thread function
+ * @enable: flag to enable/disable the driver event.
+ * @resume_wq_handler: work queue for resume the device
*
* This structure gives the device data information.
*/
@@ -184,6 +189,8 @@ struct synaptics_rmi4_data {
struct regulator *regulator;
wait_queue_head_t wait;
bool touch_stopped;
+ bool enable;
+ struct work_struct resume_wq_handler;
};
/**
@@ -291,6 +298,133 @@ exit:
}
/**
+ * synaptics_rmi4_enable() - enable the touchpad driver event
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is to enable the touchpad driver event and returns integer.
+ */
+static int synaptics_rmi4_enable(struct synaptics_rmi4_data *pdata)
+{
+ int retval;
+ unsigned char intr_status;
+
+ if (pdata->board->regulator_en)
+ regulator_enable(pdata->regulator);
+ enable_irq(pdata->board->irq_number);
+ pdata->touch_stopped = false;
+
+ msleep(RESUME_DELAY);
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ &intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ pdata->fn01_ctrl_base_addr + 1,
+ (intr_status | TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_disable() - disable the touchpad driver event
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is to disable the driver event and returns integer.
+ */
+
+static int synaptics_rmi4_disable(struct synaptics_rmi4_data *pdata)
+{
+ int retval;
+ unsigned char intr_status;
+
+ pdata->touch_stopped = true;
+ disable_irq(pdata->board->irq_number);
+
+ retval = synaptics_rmi4_i2c_block_read(pdata,
+ pdata->fn01_data_base_addr + 1,
+ &intr_status,
+ pdata->number_of_interrupt_register);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_byte_write(pdata,
+ pdata->fn01_ctrl_base_addr + 1,
+ (intr_status & ~TOUCHPAD_CTRL_INTR));
+ if (retval < 0)
+ return retval;
+ if (pdata->board->regulator_en)
+ regulator_disable(pdata->regulator);
+
+ return 0;
+}
+
+/**
+ * synaptics_rmi4_show_attr_enable() - show the touchpad enable value
+ * @dev: pointer to device data structure
+ * @attr: pointer to attribute structure
+ * @buf: pointer to character buffer
+ *
+ * This function is to show the touchpad enable value and returns ssize_t.
+ */
+static ssize_t synaptics_rmi4_show_attr_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", pdata->enable);
+}
+
+/**
+ * synaptics_rmi4_store_attr_enable() - store the touchpad enable value
+ * @dev: pointer to device data structure
+ * @attr: pointer to attribute structure
+ * @buf: pointer to character buffer
+ * @count: number fo arguments
+ *
+ * This function is to store the touchpad enable value and returns ssize_t.
+ */
+static ssize_t synaptics_rmi4_store_attr_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev);
+ unsigned long val;
+ int retval = 0;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (pdata->enable != val) {
+ pdata->enable = val ? true : false;
+ if (pdata->enable)
+ retval = synaptics_rmi4_enable(pdata);
+ else
+ retval = synaptics_rmi4_disable(pdata);
+
+ }
+ return ((retval < 0) ? retval : count);
+}
+
+static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO,
+ synaptics_rmi4_show_attr_enable, synaptics_rmi4_store_attr_enable);
+
+static struct attribute *synaptics_rmi4_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+
+static struct attribute_group synaptics_rmi4_attr_group = {
+ .attrs = synaptics_rmi4_attrs,
+};
+
+/**
* synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
@@ -316,8 +450,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
unsigned char data[DATA_LEN];
int x[RMI4_NUMBER_OF_MAX_FINGERS];
int y[RMI4_NUMBER_OF_MAX_FINGERS];
- int wx[RMI4_NUMBER_OF_MAX_FINGERS];
- int wy[RMI4_NUMBER_OF_MAX_FINGERS];
+ int w[RMI4_NUMBER_OF_MAX_FINGERS];
+ static int prv_x[RMI4_NUMBER_OF_MAX_FINGERS];
+ static int prv_y[RMI4_NUMBER_OF_MAX_FINGERS];
struct i2c_client *client = pdata->i2c_client;
/* get 2D sensor finger data */
@@ -376,11 +511,7 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
y[touch_count] =
(data[1] << 4) |
((data[2] >> 4) & MASK_4BIT);
- wy[touch_count] =
- (data[3] >> 4) & MASK_4BIT;
- wx[touch_count] =
- (data[3] & MASK_4BIT);
-
+ w[touch_count] = data[3];
if (pdata->board->x_flip)
x[touch_count] =
pdata->sensor_max_x -
@@ -389,6 +520,25 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
y[touch_count] =
pdata->sensor_max_y -
y[touch_count];
+ if (x[touch_count] < 0)
+ x[touch_count] = 0;
+ else if (x[touch_count] >= pdata->sensor_max_x)
+ x[touch_count] =
+ pdata->sensor_max_x - 1;
+
+ if (y[touch_count] < 0)
+ y[touch_count] = 0;
+ else if (y[touch_count] >= pdata->sensor_max_y)
+ y[touch_count] =
+ pdata->sensor_max_y - 1;
+ }
+ if ((abs(x[finger] - prv_x[finger]) < DELTA) &&
+ (abs(y[finger] - prv_y[finger]) < DELTA)) {
+ x[finger] = prv_x[finger];
+ y[finger] = prv_y[finger];
+ } else {
+ prv_x[finger] = x[finger];
+ prv_y[finger] = y[finger];
}
/* number of active touch points */
touch_count++;
@@ -399,7 +549,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
if (touch_count) {
for (finger = 0; finger < touch_count; finger++) {
input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
- max(wx[finger] , wy[finger]));
+ max(x[finger] , y[finger]));
+ input_report_abs(pdata->input_dev, ABS_MT_WIDTH_MAJOR,
+ w[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
x[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
@@ -502,7 +654,7 @@ static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
touch_count = synaptics_rmi4_sensor_report(pdata);
if (touch_count)
wait_event_timeout(pdata->wait, pdata->touch_stopped,
- msecs_to_jiffies(1));
+ msecs_to_jiffies(TIMEOUT_PERIOD));
else
break;
} while (!pdata->touch_stopped);
@@ -881,9 +1033,27 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
}
/**
+ * synaptics_rmi4_resume_handler() - work queue for resume handler
+ * @work:work_struct structure pointer
+ *
+ * This work queue handler used to resume the device and returns none
+ */
+static void synaptics_rmi4_resume_handler(struct work_struct *work)
+{
+ struct synaptics_rmi4_data *prmi4_data = container_of(work,
+ struct synaptics_rmi4_data, resume_wq_handler);
+ struct i2c_client *client = prmi4_data->i2c_client;
+ int retval;
+
+ retval = synaptics_rmi4_enable(prmi4_data);
+ if (retval < 0)
+ dev_err(&client->dev, "%s: resume failed\n", __func__);
+}
+
+/**
* synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
- * @i2c: i2c client structure pointer
- * @id:i2c device id pointer
+ * @client: i2c client structure pointer
+ * @dev_id:i2c device id pointer
*
* This function will allocate and initialize the instance
* data and request the irq and set the instance data as the clients
@@ -927,19 +1097,17 @@ static int __devinit synaptics_rmi4_probe
goto err_input;
}
- rmi4_data->regulator = regulator_get(&client->dev, "vdd");
- if (IS_ERR(rmi4_data->regulator)) {
- dev_err(&client->dev, "%s:get regulator failed\n",
- __func__);
- retval = PTR_ERR(rmi4_data->regulator);
- goto err_get_regulator;
- }
- retval = regulator_enable(rmi4_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "%s:regulator enable failed\n",
- __func__);
- goto err_regulator_enable;
+ if (platformdata->regulator_en) {
+ rmi4_data->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(rmi4_data->regulator)) {
+ dev_err(&client->dev, "%s:get regulator failed\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->regulator);
+ goto err_regulator;
+ }
+ regulator_enable(rmi4_data->regulator);
}
+
init_waitqueue_head(&rmi4_data->wait);
/*
* Copy i2c_client pointer into RTID's i2c_client pointer for
@@ -987,7 +1155,16 @@ static int __devinit synaptics_rmi4_probe
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
rmi4_data->sensor_max_y, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
- MAX_TOUCH_MAJOR, 0, 0);
+ max(rmi4_data->sensor_max_y, rmi4_data->sensor_max_y),
+ 0, 0);
+ input_set_abs_params(rmi4_data->input_dev, ABS_MT_WIDTH_MAJOR, 0,
+ MAX_WIDTH_MAJOR, 0, 0);
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(&client->dev, "%s:input register failed\n", __func__);
+ goto err_input_register;
+ }
/* Clear interrupts */
synaptics_rmi4_i2c_block_read(rmi4_data,
@@ -1000,24 +1177,34 @@ static int __devinit synaptics_rmi4_probe
if (retval) {
dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
__func__, platformdata->irq_number);
- goto err_query_dev;
+ goto err_request_irq;
}
- retval = input_register_device(rmi4_data->input_dev);
+ INIT_WORK(&rmi4_data->resume_wq_handler, synaptics_rmi4_resume_handler);
+
+ /* sysfs implementation for dynamic enable/disable the input event */
+ retval = sysfs_create_group(&client->dev.kobj,
+ &synaptics_rmi4_attr_group);
if (retval) {
- dev_err(&client->dev, "%s:input register failed\n", __func__);
- goto err_free_irq;
+ dev_err(&client->dev, "failed to create sysfs entries\n");
+ goto err_sysfs;
}
-
+ rmi4_data->enable = true;
return retval;
-err_free_irq:
+err_sysfs:
+ cancel_work_sync(&rmi4_data->resume_wq_handler);
+err_request_irq:
free_irq(platformdata->irq_number, rmi4_data);
+ input_unregister_device(rmi4_data->input_dev);
+err_input_register:
+ i2c_set_clientdata(client, NULL);
err_query_dev:
- regulator_disable(rmi4_data->regulator);
-err_regulator_enable:
- regulator_put(rmi4_data->regulator);
-err_get_regulator:
+ if (platformdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+err_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input:
@@ -1037,12 +1224,16 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+ sysfs_remove_group(&client->dev.kobj, &synaptics_rmi4_attr_group);
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
+ cancel_work_sync(&rmi4_data->resume_wq_handler);
free_irq(pdata->irq_number, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
+ if (pdata->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
kfree(rmi4_data);
return 0;
@@ -1059,31 +1250,11 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
static int synaptics_rmi4_suspend(struct device *dev)
{
/* Touch sleep mode */
- int retval;
- unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
- rmi4_data->touch_stopped = true;
- disable_irq(pdata->irq_number);
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status & ~TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
-
- regulator_disable(rmi4_data->regulator);
-
- return 0;
+ return synaptics_rmi4_disable(rmi4_data);
}
+
/**
* synaptics_rmi4_resume() - resume the touch screen controller
* @dev: pointer to device structure
@@ -1093,28 +1264,9 @@ static int synaptics_rmi4_suspend(struct device *dev)
*/
static int synaptics_rmi4_resume(struct device *dev)
{
- int retval;
- unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
-
- regulator_enable(rmi4_data->regulator);
- enable_irq(pdata->irq_number);
- rmi4_data->touch_stopped = false;
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status | TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
+ schedule_work(&rmi4_data->resume_wq_handler);
return 0;
}
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
index 384436ef806..973abc97374 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -42,6 +42,7 @@ struct synaptics_rmi4_platform_data {
int irq_type;
bool x_flip;
bool y_flip;
+ bool regulator_en;
};
#endif
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 00000000000..a452e888d77
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,13 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Martin Hovang (martin.xm.hovang@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+# Trursted Execution Environment Configuration
+config TEE_SUPPORT
+ bool "Trusted Execution Environment Support"
+ default y
+ ---help---
+ This implements the Trusted Execution Environment (TEE) Client
+ API Specification from GlobalPlatform Device Technology.
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 00000000000..b937eb19d72
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Martin Hovang (martin.xm.hovang@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_TEE_SUPPORT) += tee_service.o
+obj-$(CONFIG_TEE_SUPPORT) += tee_driver.o
diff --git a/drivers/tee/tee_driver.c b/drivers/tee/tee_driver.c
new file mode 100644
index 00000000000..442dec5fe06
--- /dev/null
+++ b/drivers/tee/tee_driver.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Martin Hovang <martin.xm.hovang@stericsson.com>
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/tee.h>
+#include <linux/slab.h>
+#include <linux/hwmem.h>
+
+#define TEED_NAME "tee"
+#define TEED_PFX "TEE: "
+
+#define TEED_STATE_OPEN_DEV 0
+#define TEED_STATE_OPEN_SESSION 1
+
+static struct mutex sync;
+
+static int tee_open(struct inode *inode, struct file *file);
+static int tee_release(struct inode *inode, struct file *file);
+static int tee_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset);
+static int tee_write(struct file *filp, const char __user *buffer,
+ size_t length, loff_t *offset);
+
+static inline void set_emsg(struct tee_session *ts, u32 msg, int line)
+{
+ pr_err(TEED_PFX "msg: 0x%08x at line: %d\n", msg, line);
+ ts->err = msg;
+ ts->origin = TEED_ORIGIN_DRIVER;
+}
+
+static void reset_session(struct tee_session *ts)
+{
+ int i;
+
+ ts->state = TEED_STATE_OPEN_DEV;
+ ts->err = TEED_SUCCESS;
+ ts->origin = TEED_ORIGIN_DRIVER;
+ ts->id = 0;
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; i++)
+ ts->vaddr[i] = NULL;
+ ts->ta = NULL;
+ ts->uuid = NULL;
+ ts->cmd = 0;
+ ts->driver_cmd = TEED_OPEN_SESSION;
+ ts->ta_size = 0;
+ ts->op = NULL;
+}
+
+static int copy_ta(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ ts->ta = kmalloc(ku_buffer->ta_size, GFP_KERNEL);
+ if (ts->ta == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory (ta)\n",
+ __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__);
+ return -ENOMEM;
+ }
+
+ ts->ta_size = ku_buffer->ta_size;
+
+ memcpy(ts->ta, ku_buffer->ta, ku_buffer->ta_size);
+ return 0;
+}
+
+static int copy_uuid(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ ts->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL);
+
+ if (ts->uuid == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory (uuid)\n",
+ __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__);
+ return -ENOMEM;
+ }
+
+ memcpy(ts->uuid, ku_buffer->uuid, sizeof(struct tee_uuid));
+
+ return 0;
+}
+
+static inline void free_operation(struct tee_session *ts,
+ struct hwmem_alloc **alloc,
+ int memrefs_allocated)
+{
+ int i;
+
+ for (i = 0; i < memrefs_allocated; ++i) {
+ if (ts->op->shm[i].buffer) {
+ hwmem_kunmap(alloc[i]);
+ hwmem_unpin(alloc[i]);
+ hwmem_release(alloc[i]);
+ ts->op->shm[i].buffer = NULL;
+ }
+
+ if (ts->vaddr[i])
+ ts->vaddr[i] = NULL;
+ }
+
+ kfree(ts->op);
+ ts->op = NULL;
+}
+
+static inline void memrefs_phys_to_virt(struct tee_session *ts)
+{
+ int i;
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ if (ts->op->flags & (1 << i)) {
+ ts->op->shm[i].buffer =
+ phys_to_virt((unsigned long)
+ ts->op->shm[i].buffer);
+ }
+ }
+}
+
+static inline void memrefs_virt_to_phys(struct tee_session *ts)
+{
+ int i;
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ if (ts->op->flags & (1 << i)) {
+ ts->op->shm[i].buffer =
+ (void *)virt_to_phys(ts->op->shm[i].buffer);
+ }
+ }
+}
+
+static int copy_memref_to_user(struct tee_session *ts,
+ struct tee_operation __user *ubuf_op,
+ int memref)
+{
+ unsigned long bytes_left;
+
+ bytes_left = copy_to_user(ubuf_op->shm[memref].buffer,
+ ts->vaddr[memref],
+ ts->op->shm[memref].size);
+
+ if (bytes_left != 0) {
+ pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu "
+ "bytes left of buffer).\n", __func__, bytes_left);
+ return bytes_left;
+ }
+
+ bytes_left = put_user(ts->op->shm[memref].size,
+ &ubuf_op->shm[memref].size);
+
+ if (bytes_left != 0) {
+ pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu "
+ "bytes left of size).\n", __func__, bytes_left);
+ return -EINVAL;
+ }
+
+ bytes_left = put_user(ts->op->shm[memref].flags,
+ &ubuf_op->shm[memref].flags);
+ if (bytes_left != 0) {
+ pr_err(TEED_PFX "[%s] failed to copy result to user space (%lu "
+ "bytes left of flags).\n", __func__, bytes_left);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int copy_memref_to_kernel(struct tee_session *ts,
+ struct tee_session *ku_buffer,
+ struct hwmem_alloc **alloc,
+ int memref)
+{
+ int ret = -EINVAL;
+ size_t mem_chunks_length = 1;
+ struct hwmem_mem_chunk mem_chunks;
+
+ if (ku_buffer->op->shm[memref].size == 0) {
+ pr_err(TEED_PFX "[%s] error, size of memref is zero "
+ "(memref: %d)\n", __func__, memref);
+ return ret;
+ }
+
+ alloc[memref] = hwmem_alloc(ku_buffer->op->shm[memref].size,
+ (HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE),
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+
+ if (IS_ERR(alloc[memref])) {
+ pr_err(TEED_PFX "[%s] couldn't alloc hwmem_alloc (memref: %d)"
+ "\n", __func__, memref);
+ return PTR_ERR(alloc[memref]);
+ }
+
+ ret = hwmem_pin(alloc[memref], &mem_chunks, &mem_chunks_length);
+ if (ret) {
+ pr_err(TEED_PFX "[%s] couldn't pin buffer (memref: %d)\n",
+ __func__, memref);
+ return ret;
+ }
+
+ /*
+ * Since phys_to_virt is not working for hwmem memory we are storing the
+ * virtual addresses in separate array in tee_session and we keep the
+ * address of the physical pointers in the memref buffer.
+ */
+ ts->op->shm[memref].buffer = (void *)mem_chunks.paddr;
+ ts->vaddr[memref] = hwmem_kmap(alloc[memref]);
+
+ /* Buffer unmapped/freed in invoke_command if this function fails. */
+ if (!ts->op->shm[memref].buffer || !ts->vaddr[memref]) {
+ pr_err(TEED_PFX "[%s] out of memory (memref: %d)\n",
+ __func__, memref);
+ return -ENOMEM;
+ }
+
+ if (ku_buffer->op->shm[memref].flags & TEEC_MEM_INPUT)
+ memcpy(ts->vaddr[memref],
+ ku_buffer->op->shm[memref].buffer,
+ ku_buffer->op->shm[memref].size);
+
+ ts->op->shm[memref].size = ku_buffer->op->shm[memref].size;
+ ts->op->shm[memref].flags = ku_buffer->op->shm[memref].flags;
+
+ return 0;
+}
+
+static int open_tee_device(struct tee_session *ts,
+ struct tee_session *ku_buffer)
+{
+ int ret;
+
+ if (ku_buffer->driver_cmd != TEED_OPEN_SESSION) {
+ set_emsg(ts, TEED_ERROR_BAD_STATE, __LINE__);
+ return -EINVAL;
+ }
+
+ if (ku_buffer->ta) {
+ ret = copy_ta(ts, ku_buffer);
+ } else if (ku_buffer->uuid) {
+ ret = copy_uuid(ts, ku_buffer);
+ } else {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION, __LINE__);
+ return -EINVAL;
+ }
+
+ ts->id = 0;
+ ts->state = TEED_STATE_OPEN_SESSION;
+ return ret;
+}
+
+static int invoke_command(struct tee_session *ts,
+ struct tee_session *ku_buffer,
+ struct tee_session __user *u_buffer)
+{
+ int i;
+ int ret = 0;
+ /* To keep track of which memrefs to free when failure occurs. */
+ int memrefs_allocated = 0;
+ struct hwmem_alloc *alloc[TEEC_CONFIG_PAYLOAD_REF_COUNT];
+
+ ts->op = kmalloc(sizeof(struct tee_operation), GFP_KERNEL);
+
+ if (!ts->op) {
+ if (ts->op == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory "
+ "(op)\n", __func__);
+ set_emsg(ts, TEED_ERROR_OUT_OF_MEMORY, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ ts->op->flags = ku_buffer->op->flags;
+ ts->cmd = ku_buffer->cmd;
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ ts->op->shm[i].buffer = NULL;
+ memrefs_allocated++;
+
+ /* We only want to copy memrefs in use to kernel space. */
+ if (ku_buffer->op->flags & (1 << i)) {
+ ret = copy_memref_to_kernel(ts, ku_buffer, alloc, i);
+ if (ret) {
+ pr_err(TEED_PFX "[%s] failed copy memref[%d] "
+ "to kernel", __func__, i);
+ goto err;
+ }
+ } else {
+ ts->op->shm[i].size = 0;
+ ts->op->shm[i].flags = 0;
+ }
+ }
+
+ if (call_sec_world(ts, TEED_INVOKE)) {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION, __LINE__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < TEEC_CONFIG_PAYLOAD_REF_COUNT; ++i) {
+ if ((ku_buffer->op->flags & (1 << i)) &&
+ (ku_buffer->op->shm[i].flags & TEEC_MEM_OUTPUT)) {
+ ret = copy_memref_to_user(ts, u_buffer->op, i);
+ if (ret) {
+ pr_err(TEED_PFX "[%s] failed copy memref[%d] "
+ "to user", __func__, i);
+ goto err;
+ }
+ }
+ }
+err:
+ free_operation(ts, alloc, memrefs_allocated);
+
+ return ret;
+}
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+ struct tee_session *ts;
+ filp->private_data = kmalloc(sizeof(struct tee_session),
+ GFP_KERNEL);
+
+ if (filp->private_data == NULL) {
+ pr_err(TEED_PFX "[%s] allocation failed", __func__);
+ return -ENOMEM;
+ }
+
+ ts = (struct tee_session *)(filp->private_data);
+ reset_session(ts);
+
+ return 0;
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/*
+ * Called when a process, which already opened the dev file, attempts
+ * to read from it. This function gets the current status of the session.
+ */
+static int tee_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct tee_read buf;
+ struct tee_session *ts;
+
+ if (length != sizeof(struct tee_read)) {
+ pr_err(TEED_PFX "[%s] error, incorrect input length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ts = (struct tee_session *)(filp->private_data);
+
+ if (ts == NULL) {
+ pr_err(TEED_PFX "[%s] error, private_data not "
+ "initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sync);
+
+ buf.err = ts->err;
+ buf.origin = ts->origin;
+
+ mutex_unlock(&sync);
+
+ if (copy_to_user(buffer, &buf, length)) {
+ pr_err(TEED_PFX "[%s] error, copy_to_user failed!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return length;
+}
+
+/*
+ * Called when a process writes to a dev file.
+ */
+static int tee_write(struct file *filp, const char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct tee_session ku_buffer;
+ struct tee_session *ts;
+ int ret = 0;
+
+ if (length != sizeof(struct tee_session)) {
+ pr_err(TEED_PFX "[%s] error, incorrect input length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ku_buffer, buffer, length)) {
+ pr_err(TEED_PFX "[%s] error, tee_session "
+ "copy_from_user failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ts = (struct tee_session *)(filp->private_data);
+
+ if (ts == NULL) {
+ pr_err(TEED_PFX "[%s] error, private_data not "
+ "initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sync);
+
+ switch (ts->state) {
+ case TEED_STATE_OPEN_DEV:
+ ret = open_tee_device(ts, &ku_buffer);
+ break;
+
+ case TEED_STATE_OPEN_SESSION:
+ switch (ku_buffer.driver_cmd) {
+ case TEED_INVOKE:
+ ret = invoke_command(ts, &ku_buffer,
+ (struct tee_session *)buffer);
+ break;
+
+ case TEED_CLOSE_SESSION:
+ /* no caching implemented yet... */
+ if (call_sec_world(ts, TEED_CLOSE_SESSION)) {
+ set_emsg(ts, TEED_ERROR_COMMUNICATION,
+ __LINE__);
+ ret = -EINVAL;
+ }
+
+ kfree(ts->ta);
+ ts->ta = NULL;
+
+ reset_session(ts);
+ break;
+
+ default:
+ set_emsg(ts, TEED_ERROR_BAD_PARAMETERS, __LINE__);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ pr_err(TEED_PFX "[%s] unknown state\n", __func__);
+ set_emsg(ts, TEED_ERROR_BAD_STATE, __LINE__);
+ ret = -EINVAL;
+ }
+
+ /*
+ * We expect that ret has value zero when reaching the end here.
+ * If it has any other value some error must have occured.
+ */
+ if (!ret) {
+ ret = length;
+ } else {
+ pr_err(TEED_PFX "[%s], forcing error to -EINVAL\n", __func__);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&sync);
+
+ return ret;
+}
+
+int teec_initialize_context(const char *name, struct tee_context *context)
+{
+ return TEED_SUCCESS;
+}
+EXPORT_SYMBOL(teec_initialize_context);
+
+int teec_finalize_context(struct tee_context *context)
+{
+ return TEED_SUCCESS;
+}
+EXPORT_SYMBOL(teec_finalize_context);
+
+int teec_open_session(struct tee_context *context,
+ struct tee_session *session,
+ const struct tee_uuid *destination,
+ unsigned int connection_method,
+ void *connection_data, struct tee_operation *operation,
+ unsigned int *error_origin)
+{
+ int res = TEED_SUCCESS;
+
+ if (session == NULL || destination == NULL) {
+ pr_err(TEED_PFX "[%s] session or destination == NULL\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ reset_session(session);
+
+ /*
+ * Open a session towards an application already loaded inside
+ * the TEE.
+ */
+ session->uuid = kmalloc(sizeof(struct tee_uuid), GFP_KERNEL);
+
+ if (session->uuid == NULL) {
+ pr_err(TEED_PFX "[%s] error, out of memory (uuid)\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ memcpy(session->uuid, destination, sizeof(struct tee_uuid));
+
+ session->ta = NULL;
+ session->id = 0;
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(teec_open_session);
+
+int teec_close_session(struct tee_session *session)
+{
+ int res = TEED_SUCCESS;
+
+ mutex_lock(&sync);
+
+ if (session == NULL) {
+ pr_err(TEED_PFX "[%s] error, session == NULL\n", __func__);
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ if (call_sec_world(session, TEED_CLOSE_SESSION)) {
+ pr_err(TEED_PFX "[%s] error, call_sec_world failed\n",
+ __func__);
+ res = TEED_ERROR_GENERIC;
+ goto exit;
+ }
+
+exit:
+ if (session != NULL) {
+ kfree(session->uuid);
+ session->uuid = NULL;
+ }
+
+ mutex_unlock(&sync);
+ return res;
+}
+EXPORT_SYMBOL(teec_close_session);
+
+int teec_invoke_command(
+ struct tee_session *session, unsigned int command_id,
+ struct tee_operation *operation,
+ unsigned int *error_origin)
+{
+ int res = TEED_SUCCESS;
+ int i;
+
+ mutex_lock(&sync);
+
+ if (session == NULL || operation == NULL || error_origin == NULL) {
+ pr_err(TEED_PFX "[%s] error, input parameters == NULL\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ for (i = 0; i < 4; ++i) {
+ /* We only want to translate memrefs in use. */
+ if (operation->flags & (1 << i)) {
+ operation->shm[i].buffer =
+ (void *)virt_to_phys(
+ operation->shm[i].buffer);
+ }
+ }
+ session->op = operation;
+ session->cmd = command_id;
+
+ /*
+ * Call secure world
+ */
+ if (call_sec_world(session, TEED_INVOKE)) {
+ pr_err(TEED_PFX "[%s] error, call_sec_world failed\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = TEED_ORIGIN_DRIVER;
+ res = TEED_ERROR_GENERIC;
+ }
+ if (session->err != TEED_SUCCESS) {
+ pr_err(TEED_PFX "[%s] error, call_sec_world failed\n",
+ __func__);
+ if (error_origin != NULL)
+ *error_origin = session->origin;
+ res = session->err;
+ }
+
+ memrefs_phys_to_virt(session);
+ session->op = NULL;
+
+exit:
+ mutex_unlock(&sync);
+ return res;
+}
+EXPORT_SYMBOL(teec_invoke_command);
+
+int teec_allocate_shared_memory(struct tee_context *context,
+ struct tee_sharedmemory *shared_memory)
+{
+ int res = TEED_SUCCESS;
+
+ if (shared_memory == NULL) {
+ res = TEED_ERROR_BAD_PARAMETERS;
+ goto exit;
+ }
+
+ shared_memory->buffer = kmalloc(shared_memory->size,
+ GFP_KERNEL);
+
+ if (shared_memory->buffer == NULL) {
+ res = TEED_ERROR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(teec_allocate_shared_memory);
+
+void teec_release_shared_memory(struct tee_sharedmemory *shared_memory)
+{
+ kfree(shared_memory->buffer);
+}
+EXPORT_SYMBOL(teec_release_shared_memory);
+
+static const struct file_operations tee_fops = {
+ .owner = THIS_MODULE,
+ .read = tee_read,
+ .write = tee_write,
+ .open = tee_open,
+ .release = tee_release,
+};
+
+static struct miscdevice tee_dev = {
+ MISC_DYNAMIC_MINOR,
+ TEED_NAME,
+ &tee_fops
+};
+
+static int __init tee_init(void)
+{
+ int err = 0;
+
+ err = misc_register(&tee_dev);
+
+ if (err) {
+ pr_err(TEED_PFX "[%s] error %d adding character device "
+ "TEE\n", __func__, err);
+ }
+
+ mutex_init(&sync);
+
+ return err;
+}
+
+static void __exit tee_exit(void)
+{
+ misc_deregister(&tee_dev);
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Trusted Execution Enviroment driver");
diff --git a/drivers/tee/tee_service.c b/drivers/tee/tee_service.c
new file mode 100644
index 00000000000..b01e9d0ac39
--- /dev/null
+++ b/drivers/tee/tee_service.c
@@ -0,0 +1,17 @@
+/*
+ * TEE service to handle the calls to trusted applications.
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/tee.h>
+#include <linux/device.h>
+
+int __weak call_sec_world(struct tee_session *ts, int sec_cmd)
+{
+ pr_info("[%s] Generic call_sec_world called!\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 070b442c1f8..b2831ed01bb 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -63,6 +63,14 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_AMBA_PL011_CLOCK_CONTROL
+ bool "Support for clock control on AMBA serial port"
+ depends on SERIAL_AMBA_PL011
+ select CONSOLE_POLL
+ ---help---
+ Say Y here if you wish to use amba set_termios function to control
+ the pl011 clock. Any positive baudrate passed enables clock,
+
config SERIAL_SB1250_DUART
tristate "BCM1xxx on-chip DUART serial support"
depends on SIBYTE_SB1xxx_SOC=y
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3d569cd68f5..d356b940382 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -47,11 +47,13 @@
#include <linux/amba/serial.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/types.h>
+#include <linux/pm_runtime.h>
#include <asm/io.h>
#include <asm/sizes.h>
@@ -67,6 +69,37 @@
#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+/*
+ * The console UART is handled differently for power management (it doesn't
+ * take the regulator, in order to allow the system to go to sleep even if the
+ * console is open). This should be removed once cable detect is in place.
+ */
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) ((port)->cons \
+ && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+/* Available amba pl011 port clock states */
+enum pl011_clk_states {
+ PL011_CLK_OFF = 0, /* clock disabled */
+ PL011_CLK_REQUEST_OFF, /* disable after TX flushed */
+ PL011_CLK_ON, /* clock enabled */
+ PL011_PORT_OFF, /* port disabled */
+};
+
+/*
+ * Backup registers to be used during regulator startup/shutdown
+ */
+static const u32 backup_regs[] = {
+ UART011_IBRD,
+ UART011_FBRD,
+ ST_UART011_LCRH_RX,
+ ST_UART011_LCRH_TX,
+ UART011_CR,
+ UART011_IMSC,
+};
#define UART_WA_SAVE_NR 14
@@ -89,7 +122,9 @@ static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
};
static u32 uart_wa_regdata[UART_WA_SAVE_NR];
-static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+static unsigned int uart_wa_tlet_line;
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa,
+ (unsigned long) &uart_wa_tlet_line);
/* There is by now at least one vendor with differing details, so handle it */
struct vendor_data {
@@ -158,10 +193,18 @@ struct uart_amba_port {
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
+ unsigned int ifls; /* vendor-specific */
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
bool autorts;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ enum pl011_clk_states clk_state; /* actual clock state */
+ struct delayed_work clk_off_work; /* work used for clock off */
+ unsigned int clk_off_delay; /* clock off delay */
+#endif
+ struct regulator *regulator;
+ u32 backup[ARRAY_SIZE(backup_regs)];
char type[12];
bool interrupt_may_hang; /* vendor-specific */
#ifdef CONFIG_DMA_ENGINE
@@ -1070,13 +1113,17 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
*/
static void pl011_lockup_wa(unsigned long data)
{
- struct uart_amba_port *uap = amba_ports[0];
+ struct uart_amba_port *uap = amba_ports[*(unsigned int *)data];
void __iomem *base = uap->port.membase;
struct circ_buf *xmit = &uap->port.state->xmit;
struct tty_struct *tty = uap->port.state->port.tty;
int buf_empty_retries = 200;
int loop;
+ /* Exit early if there is no tty */
+ if (!tty)
+ return;
+
/* Stop HCI layer from submitting data for tx */
tty->hw_stopped = 1;
while (!uart_circ_empty(xmit)) {
@@ -1117,6 +1164,260 @@ static void pl011_lockup_wa(unsigned long data)
tty->hw_stopped = 0;
}
+static void __pl011_startup(struct uart_amba_port *uap)
+{
+ unsigned int cr;
+
+ writew(uap->ifls, uap->port.membase + UART011_IFLS);
+
+ /*
+ * Provoke TX FIFO interrupt into asserting.
+ */
+ cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
+ writew(cr, uap->port.membase + UART011_CR);
+ writew(0, uap->port.membase + UART011_FBRD);
+ writew(1, uap->port.membase + UART011_IBRD);
+ writew(0, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_tx != uap->lcrh_rx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(0, uap->port.membase + uap->lcrh_tx);
+ }
+ writew(0, uap->port.membase + UART01x_DR);
+ while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
+ barrier();
+}
+
+/* Backup the registers during regulator startup/shutdown */
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+static int pl011_backup(struct uart_amba_port *uap, bool suspend)
+{
+ int i, cnt;
+
+ if (!suspend) {
+ __pl011_startup(uap);
+ writew(0, uap->port.membase + UART011_CR);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(backup_regs); i++) {
+ if (suspend)
+ uap->backup[i] = readw(uap->port.membase +
+ backup_regs[i]);
+ else {
+ if (backup_regs[i] == ST_UART011_LCRH_TX) {
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX
+ * register, to get this delay write read
+ * only register 10 times
+ */
+ for (cnt = 0; cnt < 10; ++cnt)
+ writew(0xff, uap->port.membase +
+ UART011_MIS);
+ }
+
+ writew(uap->backup[i],
+ uap->port.membase + backup_regs[i]);
+ }
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+/* Turn clock off if TX buffer is empty, otherwise reschedule */
+static void pl011_clock_off(struct work_struct *work)
+{
+ struct uart_amba_port *uap = container_of(work, struct uart_amba_port,
+ clk_off_work.work);
+ struct uart_port *port = &uap->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+ bool disable_regulator = false;
+ bool runtime_put = false;
+ unsigned int busy, interrupt_status;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ interrupt_status = readw(uap->port.membase + UART011_MIS);
+ busy = readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY;
+
+ if (uap->clk_state == PL011_CLK_REQUEST_OFF) {
+ if (uart_circ_empty(xmit) && !interrupt_status && !busy) {
+ if (!uart_console(&uap->port) && uap->regulator) {
+ pl011_backup(uap, true);
+ disable_regulator = true;
+ }
+ runtime_put = true;
+ uap->clk_state = PL011_CLK_OFF;
+ clk_disable(uap->clk);
+ } else
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (disable_regulator)
+ regulator_disable(uap->regulator);
+ if (runtime_put)
+ pm_runtime_put_sync(uap->port.dev);
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+static void pl011_clock_request_off(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_amba_port *uap = (struct uart_amba_port *)(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (uap->clk_state == PL011_CLK_ON) {
+ uap->clk_state = PL011_CLK_REQUEST_OFF;
+ /* Turn off later */
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Request to immediately turn on uart clock */
+static void pl011_clock_on(struct uart_port *port)
+{
+ unsigned long flags;
+ struct uart_amba_port *uap = (struct uart_amba_port *)(port);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ switch (uap->clk_state) {
+ case PL011_CLK_OFF:
+ pm_runtime_get_sync(uap->port.dev);
+ clk_enable(uap->clk);
+ if (!uart_console(&uap->port) && uap->regulator) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ regulator_enable(uap->regulator);
+ spin_lock_irqsave(&port->lock, flags);
+ pl011_backup(uap, false);
+ }
+ /* fallthrough */
+ case PL011_CLK_REQUEST_OFF:
+ __cancel_delayed_work(&uap->clk_off_work);
+ uap->clk_state = PL011_CLK_ON;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void pl011_clock_check(struct uart_amba_port *uap)
+{
+ /* Reshedule work during off request */
+ if (uap->clk_state == PL011_CLK_REQUEST_OFF)
+ /* New TX - restart work */
+ if (__cancel_delayed_work(&uap->clk_off_work))
+ schedule_delayed_work(&uap->clk_off_work,
+ uap->clk_off_delay);
+}
+
+static int pl011_power_startup(struct uart_amba_port *uap)
+{
+ int retval = 0;
+
+ if (uap->clk_state == PL011_PORT_OFF) {
+ pm_runtime_get_sync(uap->port.dev);
+ if (!uart_console(&uap->port) && uap->regulator)
+ regulator_enable(uap->regulator);
+ retval = clk_enable(uap->clk);
+ if (!retval) {
+ uap->clk_state = PL011_CLK_ON;
+ } else {
+ uap->clk_state = PL011_PORT_OFF;
+ pm_runtime_put_sync(uap->port.dev);
+ }
+ }
+
+ return retval;
+}
+
+static void pl011_power_shutdown(struct uart_amba_port *uap)
+{
+ bool disable_regulator = false;
+ bool runtime_put = false;
+
+ cancel_delayed_work_sync(&uap->clk_off_work);
+
+ spin_lock_irq(&uap->port.lock);
+ if (uap->clk_state == PL011_CLK_ON ||
+ uap->clk_state == PL011_CLK_REQUEST_OFF) {
+ clk_disable(uap->clk);
+ runtime_put = true;
+ if (!uart_console(&uap->port) && uap->regulator)
+ disable_regulator = true;
+ }
+ uap->clk_state = PL011_PORT_OFF;
+ spin_unlock_irq(&uap->port.lock);
+
+ if (disable_regulator)
+ regulator_disable(uap->regulator);
+ if (runtime_put)
+ pm_runtime_put_sync(uap->port.dev);
+}
+
+static void
+pl011_clock_control(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ speed_t new_baud = tty_termios_baud_rate(termios);
+
+ if (new_baud == 0)
+ pl011_clock_request_off(port);
+ else
+ pl011_clock_on(port);
+}
+
+static void pl011_clock_control_init(struct uart_amba_port *uap)
+{
+ uap->clk_state = PL011_PORT_OFF;
+ INIT_DELAYED_WORK(&uap->clk_off_work, pl011_clock_off);
+ uap->clk_off_delay = HZ / 10; /* 100 ms */
+}
+
+#else
+/* Blank functions for clock control */
+static inline void pl011_clock_check(struct uart_amba_port *uap)
+{
+}
+
+static inline int pl011_power_startup(struct uart_amba_port *uap)
+{
+ pm_runtime_get_sync(uap->port.dev);
+ return clk_enable(uap->clk);
+}
+
+static inline void pl011_power_shutdown(struct uart_amba_port *uap)
+{
+ clk_disable(uap->clk);
+ pm_runtime_put_sync(uap->port.dev);
+}
+
+static inline void
+pl011_clock_control(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+}
+
+static inline void pl011_clock_control_init(struct uart_amba_port *uap)
+{
+}
+#endif
+
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1208,6 +1509,9 @@ static void pl011_tx_chars(struct uart_amba_port *uap)
break;
} while (--count > 0);
+ if (count)
+ pl011_clock_check(uap);
+
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
@@ -1253,7 +1557,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
do {
writew(status & ~(UART011_TXIS|UART011_RTIS|
UART011_RXIS),
- uap->port.membase + UART011_ICR);
+ uap->port.membase + UART011_ICR);
if (status & (UART011_RTIS|UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
@@ -1268,8 +1572,10 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
pl011_tx_chars(uap);
if (pass_counter-- == 0) {
- if (uap->interrupt_may_hang)
+ if (uap->interrupt_may_hang) {
+ uart_wa_tlet_line = uap->port.line;
tasklet_schedule(&pl011_lockup_tlet);
+ }
break;
}
@@ -1389,9 +1695,9 @@ static int pl011_startup(struct uart_port *port)
goto out;
/*
- * Try to enable the clock producer.
+ * Try to enable the clock producer and the regulator.
*/
- retval = clk_enable(uap->clk);
+ retval = pl011_power_startup(uap);
if (retval)
goto clk_unprep;
@@ -1408,29 +1714,7 @@ static int pl011_startup(struct uart_port *port)
if (retval)
goto clk_dis;
- writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
-
- /*
- * Provoke TX FIFO interrupt into asserting.
- */
- cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
- writew(cr, uap->port.membase + UART011_CR);
- writew(0, uap->port.membase + UART011_FBRD);
- writew(1, uap->port.membase + UART011_IBRD);
- writew(0, uap->port.membase + uap->lcrh_rx);
- if (uap->lcrh_tx != uap->lcrh_rx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(0, uap->port.membase + uap->lcrh_tx);
- }
- writew(0, uap->port.membase + UART01x_DR);
- while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
- barrier();
+ __pl011_startup(uap);
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
@@ -1471,7 +1755,7 @@ static int pl011_startup(struct uart_port *port)
return 0;
clk_dis:
- clk_disable(uap->clk);
+ pl011_power_shutdown(uap);
clk_unprep:
clk_unprepare(uap->clk);
out:
@@ -1529,10 +1813,18 @@ static void pl011_shutdown(struct uart_port *port)
if (uap->lcrh_rx != uap->lcrh_tx)
pl011_shutdown_channel(uap, uap->lcrh_tx);
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->exit)
+ plat->exit();
+ }
+
/*
- * Shut down the clock producer
+ * Shut down the clock producer and the producer
*/
- clk_disable(uap->clk);
+ pl011_power_shutdown(uap);
clk_unprepare(uap->clk);
if (uap->port.dev->platform_data) {
@@ -1545,6 +1837,32 @@ static void pl011_shutdown(struct uart_port *port)
}
+/* Power/Clock management. */
+static void pl011_serial_pm(struct uart_port *port, unsigned int state,
+unsigned int oldstate)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+
+ switch (state) {
+ case 0: /*fully on */
+ /*
+ * Enable the peripheral clock for this serial port.
+ * This is called on uart_open() or a resume event.
+ */
+ pl011_power_startup(uap);
+ break;
+ case 3: /* powered down */
+ /*
+ * Disable the peripheral clock for this serial port.
+ * This is called on uart_close() or a suspend event.
+ */
+ pl011_power_shutdown(uap);
+ break;
+ default:
+ printk(KERN_ERR "pl011_serial: unknown pm %d\n", state);
+ }
+}
+
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -1558,7 +1876,12 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
clkdiv = 8;
else
clkdiv = 16;
-
+ /*
+ * Must be before uart_get_baud_rate() call, because
+ * this function changes baudrate to default in case of 0
+ * B0 hangup !!!
+ */
+ pl011_clock_control(port, termios, old);
/*
* Ask the core to calculate the divisor for us.
*/
@@ -1746,14 +2069,13 @@ static struct uart_ops amba_pl011_pops = {
.request_port = pl010_request_port,
.config_port = pl010_config_port,
.verify_port = pl010_verify_port,
+ .pm = pl011_serial_pm,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = pl010_get_poll_char,
.poll_put_char = pl010_put_poll_char,
#endif
};
-static struct uart_amba_port *amba_ports[UART_NR];
-
#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
static void pl011_console_putchar(struct uart_port *port, int ch)
@@ -1897,6 +2219,13 @@ static struct console amba_console = {
.data = &amba_reg,
};
+static int __init pl011_console_init(void)
+{
+ register_console(&amba_console);
+ return 0;
+}
+console_initcall(pl011_console_init);
+
#define AMBA_CONSOLE (&amba_console)
#else
#define AMBA_CONSOLE NULL
@@ -1911,7 +2240,6 @@ static struct uart_driver amba_reg = {
.nr = UART_NR,
.cons = AMBA_CONSOLE,
};
-
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -1940,6 +2268,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto free;
}
+ uap->regulator = regulator_get(&dev->dev, "v-uart");
+ if (IS_ERR(uap->regulator)) {
+ dev_warn(&dev->dev, "could not get uart regulator\n");
+ uap->regulator = NULL;
+ }
+
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
ret = PTR_ERR(uap->clk);
@@ -1947,6 +2281,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
}
uap->vendor = vendor;
+ uap->ifls = vendor->ifls;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->old_cr = 0;
@@ -1972,18 +2307,30 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
+
+ pm_runtime_irq_safe(&dev->dev);
+
+ pl011_clock_control_init(uap);
+
ret = uart_add_one_port(&amba_reg, &uap->port);
+
+ if (!ret)
+ pm_runtime_put(&dev->dev);
+
if (ret) {
amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
pl011_dma_remove(uap);
clk_put(uap->clk);
unmap:
+ if (uap->regulator)
+ regulator_put(uap->regulator);
iounmap(base);
free:
kfree(uap);
}
out:
+
return ret;
}
@@ -1994,6 +2341,8 @@ static int pl011_remove(struct amba_device *dev)
amba_set_drvdata(dev, NULL);
+ pm_runtime_get_sync(uap->port.dev);
+
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
@@ -2002,6 +2351,8 @@ static int pl011_remove(struct amba_device *dev)
pl011_dma_remove(uap);
iounmap(uap->port.membase);
+ if (uap->regulator)
+ regulator_put(uap->regulator);
clk_put(uap->clk);
kfree(uap);
return 0;
@@ -2014,7 +2365,12 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state)
if (!uap)
return -EINVAL;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ cancel_delayed_work_sync(&uap->clk_off_work);
+ if (uap->clk_state == PL011_CLK_OFF)
+ return 0;
+#endif
return uart_suspend_port(&amba_reg, &uap->port);
}
@@ -2024,6 +2380,10 @@ static int pl011_resume(struct amba_device *dev)
if (!uap)
return -EINVAL;
+#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL
+ if (uap->clk_state == PL011_CLK_OFF)
+ return 0;
+#endif
return uart_resume_port(&amba_reg, &uap->port);
}
@@ -2082,7 +2442,7 @@ static void __exit pl011_exit(void)
* While this can be a module, if builtin it's most likely the console
* So let's leave module_exit but move module_init to an earlier place
*/
-arch_initcall(pl011_init);
+subsys_initcall(pl011_init);
module_exit(pl011_exit);
MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 9a56635dc19..b3eeb3e1d60 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1211,6 +1211,19 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
* and flush any outstanding URBs.
*/
} else {
+#ifdef CONFIG_USB_OTG
+ /* According to OTG supplement Rev 2.0 section 6.3
+ * Unless an A-device enables b_hnp_enable before entering
+ * suspend it shall also continue polling while the bus is
+ * suspended.
+ *
+ * We don't have to perform HNP polling, as we are going to
+ * enable b_hnp_enable before suspending.
+ */
+ if (udev->bus->hnp_support &&
+ udev->portnum == udev->bus->otg_port)
+ cancel_delayed_work(&udev->bus->hnp_polling);
+#endif
udev->can_submit = 0;
for (i = 0; i < 16; ++i) {
usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
@@ -1274,6 +1287,44 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
return status;
}
+#ifdef CONFIG_USB_OTG
+void usb_hnp_polling_work(struct work_struct *work)
+{
+ int ret;
+ struct usb_bus *bus =
+ container_of(work, struct usb_bus, hnp_polling.work);
+ struct usb_device *udev = bus->root_hub->children[bus->otg_port - 1];
+ u8 *status = kmalloc(sizeof(*status), GFP_KERNEL);
+
+ if (!status)
+ return;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE,
+ 0, OTG_STATUS_SELECTOR, status, sizeof(*status),
+ USB_CTRL_GET_TIMEOUT);
+ if (ret < 0) {
+ /* Peripheral may not be supporting HNP polling */
+ dev_vdbg(&udev->dev, "HNP polling failed. status %d\n", ret);
+ ret = usb_suspend_both(udev, PMSG_USER_SUSPEND);
+ goto out;
+ }
+
+ /* Spec says host must suspend the bus with in 2 sec. */
+ if (*status & (1 << HOST_REQUEST_FLAG)) {
+ unbind_no_pm_drivers_interfaces(udev);
+ ret = usb_suspend_both(udev, PMSG_USER_SUSPEND);
+ if (ret)
+ dev_info(&udev->dev, "suspend failed\n");
+ } else {
+ schedule_delayed_work(&bus->hnp_polling,
+ msecs_to_jiffies(THOST_REQ_POLL));
+ }
+out:
+ kfree(status);
+}
+#endif
+
static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
{
int w;
@@ -1620,7 +1671,7 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
/* Internal routine to check whether we may autosuspend a device. */
static int autosuspend_check(struct usb_device *udev)
{
- int w, i;
+ int w, i, audio_class = 0;
struct usb_interface *intf;
/* Fail if autosuspend is disabled, or any interfaces are in use, or
@@ -1654,13 +1705,28 @@ static int autosuspend_check(struct usb_device *udev)
intf->needs_remote_wakeup)
return -EOPNOTSUPP;
}
+
+ if (intf->cur_altsetting->desc.bInterfaceClass
+ == USB_CLASS_AUDIO) {
+ dev_dbg(&udev->dev,
+ "audio interface class present\n");
+ audio_class = 1;
+ }
}
+ if (audio_class) {
+ dev_dbg(&udev->dev,
+ "disabling remote wakeup for audio class\n");
+ udev->do_remote_wakeup = 0;
+ } else {
+ if (w && !device_can_wakeup(&udev->dev)) {
+ dev_dbg(&udev->dev,
+ "remote wakeup needed for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
+ udev->do_remote_wakeup = w;
+ }
+
}
- if (w && !device_can_wakeup(&udev->dev)) {
- dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
- return -EOPNOTSUPP;
- }
- udev->do_remote_wakeup = w;
return 0;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 140d3e11f21..f4f78a30b1d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -897,6 +897,9 @@ static void usb_bus_init (struct usb_bus *bus)
bus->bandwidth_isoc_reqs = 0;
INIT_LIST_HEAD (&bus->bus_list);
+#ifdef CONFIG_USB_OTG
+ INIT_DELAYED_WORK(&bus->hnp_polling, usb_hnp_polling_work);
+#endif
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index ec6c97dadbe..f78df1d3163 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -30,6 +30,12 @@
#include "usb.h"
+#ifdef CONFIG_ARCH_U8500
+#define MAX_TOPO_LEVEL_U8500 2
+#define MAX_USB_DEVICE_U8500 8
+int usb_device_count;
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@ -633,7 +639,7 @@ static int hub_hub_status(struct usb_hub *hub,
"%s failed (err = %d)\n", __func__, ret);
else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
- *change = le16_to_cpu(hub->status->hub.wHubChange);
+ *change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
@@ -1326,11 +1332,20 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* Hubs have proper suspend/resume support. */
usb_enable_autosuspend(hdev);
+#ifdef CONFIG_ARCH_U8500
+ if (hdev->level > MAX_TOPO_LEVEL_U8500) {
+ dev_err(&intf->dev,
+ "Unsupported bus topology: > %d "
+ " hub nesting\n", MAX_TOPO_LEVEL_U8500);
+ return -E2BIG;
+ }
+#else
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
return -E2BIG;
}
+#endif
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
@@ -1612,12 +1627,14 @@ static void choose_devnum(struct usb_device *udev)
* bus->devnum_next. */
devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
bus->devnum_next);
- if (devnum >= 128)
+ /* Due to Hardware bugs we need to reserve a device address
+ * for flushing of endpoints. */
+ if (devnum >= 127)
devnum = find_next_zero_bit(bus->devmap.devicemap,
128, 1);
- bus->devnum_next = ( devnum >= 127 ? 1 : devnum + 1);
+ bus->devnum_next = devnum >= 126 ? 1 : devnum + 1;
}
- if (devnum < 128) {
+ if (devnum < 127) {
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
@@ -1676,6 +1693,12 @@ void usb_disconnect(struct usb_device **pdev)
dev_info(&udev->dev, "USB disconnect, device number %d\n",
udev->devnum);
+#ifdef CONFIG_USB_OTG_20
+ if (udev->bus->hnp_support && udev->portnum == udev->bus->otg_port) {
+ cancel_delayed_work_sync(&udev->bus->hnp_polling);
+ udev->bus->hnp_support = 0;
+ }
+#endif
usb_lock_device(udev);
/* Free up all the children before we remove this device */
@@ -1752,11 +1775,13 @@ static inline void announce_device(struct usb_device *udev) { }
*
* Finish enumeration for On-The-Go devices
*/
+
+#ifdef CONFIG_USB_OTG_20
+
static int usb_enumerate_device_otg(struct usb_device *udev)
{
int err = 0;
-#ifdef CONFIG_USB_OTG
/*
* OTG-aware devices on OTG-capable root hubs may be able to use SRP,
* to wake us after we've powered off VBUS; and HNP, switching roles
@@ -1780,6 +1805,84 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
(port1 == bus->otg_port)
? "" : "non-");
+ if (port1 != bus->otg_port)
+ goto out;
+ bus->hnp_support = 1;
+
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_A_HNP_SUPPORT,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ /* OTG MESSAGE: report errors here,
+ * customize to match your product.
+ */
+ dev_info(&udev->dev,
+ "can't set HNP mode: %d\n",
+ err);
+ bus->hnp_support = 0;
+ }
+ }
+ }
+ }
+
+out:
+ if (!is_targeted(udev)) {
+
+ /* Maybe it can talk to us, though we can't talk to it.
+ * (Includes HNP test device.)
+ */
+ if (udev->bus->hnp_support) {
+ err = usb_port_suspend(udev, PMSG_SUSPEND);
+ if (err < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ }
+ } else if (udev->bus->hnp_support &&
+ udev->portnum == udev->bus->otg_port) {
+ /* HNP polling is introduced in OTG supplement Rev 2.0
+ * and older devices does not support. Work is not
+ * re-armed if device returns STALL. B-Host also performs
+ * HNP polling.
+ */
+ schedule_delayed_work(&udev->bus->hnp_polling,
+ msecs_to_jiffies(THOST_REQ_POLL));
+ }
+fail:
+
+ return err;
+}
+
+#else
+
+static int usb_enumerate_device_otg(struct usb_device *udev)
+{
+ int err = 0;
+
+#ifdef CONFIG_USB_OTG
+ /*
+ * OTG-aware devices on OTG-capable root hubs may be able to use SRP,
+ * to wake us after we've powered off VBUS; and HNP, switching roles
+ * "host" to "peripheral". The OTG descriptor helps figure this out.
+ */
+ if (!udev->bus->is_b_host
+ && udev->config
+ && udev->parent == udev->bus->root_hub) {
+ struct usb_otg_descriptor *desc = NULL;
+ struct usb_bus *bus = udev->bus;
+
+ /* descriptor may appear anywhere in config */
+ if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc) == 0) {
+ if (desc->bmAttributes & USB_OTG_HNP) {
+ unsigned port1 = udev->portnum;
+
+ dev_info(&udev->dev,
+ "Dual-Role OTG device on %sHNP port\n",
+ (port1 == bus->otg_port)
+ ? "" : "non-");
+
/* enable HNP before suspend, it's simpler */
if (port1 == bus->otg_port)
bus->b_hnp_enable = 1;
@@ -1821,6 +1924,7 @@ fail:
return err;
}
+#endif
/**
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
@@ -2480,6 +2584,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
if (udev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(udev, 0);
+#ifdef CONFIG_USB_OTG_20
+ if (!udev->bus->is_b_host && udev->bus->hnp_support &&
+ udev->portnum == udev->bus->otg_port) {
+ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (status < 0)
+ dev_dbg(&udev->dev, "can't enable HNP on port %d, "
+ "status %d\n", port1, status);
+ else
+ udev->bus->b_hnp_enable = 1;
+ }
+#endif
+
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = set_port_feature(hub->hdev,
@@ -2634,6 +2753,12 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
int status;
u16 portchange, portstatus;
+#ifdef CONFIG_USB_OTG_20
+ if (!udev->bus->is_b_host && udev->bus->hnp_support &&
+ udev->portnum == udev->bus->otg_port)
+ udev->bus->b_hnp_enable = 0;
+#endif
+
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
if (status == 0 && !port_is_suspended(hub, portstatus))
@@ -2837,7 +2962,7 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
- *
+ *
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
@@ -3035,7 +3160,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
udev->tt = &hub->tt;
udev->ttport = port1;
}
-
+
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
@@ -3195,7 +3320,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
-
+
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
dev_err(&udev->dev, "device descriptor read/all, error %d\n",
@@ -3425,6 +3550,22 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto loop;
}
+#ifdef CONFIG_ARCH_U8500
+ if (hdev->parent == NULL)
+ usb_device_count = 1;
+
+ if (usb_device_count > MAX_USB_DEVICE_U8500) {
+
+ dev_err(&udev->dev,
+ "device connected is more than %d\n",
+ MAX_USB_DEVICE_U8500);
+
+ status = -ENOTCONN; /* Don't retry */
+ goto loop;
+ }
+#endif
+
+
/* reset (non-USB 3.0 devices) and get descriptor */
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
@@ -3464,7 +3605,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto loop_disable;
}
}
-
+
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
@@ -3505,6 +3646,115 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (status)
dev_dbg(hub_dev, "%dmA power budget left\n", status);
+#ifdef CONFIG_USB_OTG_20
+ struct usb_otg_descriptor *desc = NULL;
+ int ret;
+ /* descriptor may appear anywhere in config */
+ __usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc);
+
+ ret = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (ret < 0)
+ dev_dbg(hub_dev, "set feature error\n");
+
+ u16 idVendor = le16_to_cpu(udev->descriptor.idVendor);
+ if (idVendor == USB_OTG_TEST_MODE_VID) {
+ u16 wValue, typeReq, wIndex;
+ u32 set_feature = 0;
+ int err = 0;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ u16 idProduct = le16_to_cpu(
+ udev->descriptor.idProduct);
+ /* Convert the Test Mode Request
+ * to control request
+ */
+ wValue = USB_PORT_FEAT_TEST;
+ typeReq = SetPortFeature;
+ wIndex = 1;
+
+ switch (idProduct) {
+ case USB_OTG_TEST_SE0_NAK_PID:
+ wIndex |= USB_OTG_TEST_SE0_NAK << 8;
+ set_feature = 1;
+ break;
+ case USB_OTG_TEST_J_PID:
+ wIndex |= USB_OTG_TEST_J << 8;
+ set_feature = 1;
+ break;
+ case USB_OTG_TEST_K_PID:
+ wIndex |= USB_OTG_TEST_K << 8;
+ set_feature = 1;
+ break;
+ case USB_OTG_TEST_PACKET_PID:
+ wIndex |= USB_OTG_TEST_PACKET << 8;
+ set_feature = 1;
+ break;
+ case USB_OTG_TEST_HS_HOST_PORT_SUSPEND_RESUME_PID:
+ /* Sleep for 15 sec. Suspend
+ * for 15 Sec, Then Resume
+ */
+ ssleep(15);
+
+ err = usb_port_suspend(udev,
+ PMSG_SUSPEND);
+ if (err < 0) {
+ dev_err(&udev->dev, "OTG TEST_MODE:"
+ "Suspend Fail, %d\n", err);
+ goto loop_disable;
+ }
+ ssleep(15);
+ err = usb_port_resume(udev, PMSG_RESUME);
+ if (err < 0) {
+ dev_err(&udev->dev,
+ "can't resume for"
+ "OTG TEST_MODE: %d\n", err);
+ goto loop_disable;
+ }
+ break;
+ case USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_PID:
+ /* Sleep for 15 Sec. Issue the GetDeviceDescriptor */
+ ssleep(15);
+ err = usb_get_device_descriptor(udev,
+ sizeof(udev->descriptor));
+ if (err < 0) {
+ dev_err(&udev->dev, "can't re-read"
+ "device descriptor for "
+ "OTG TEST MODE: %d\n", err);
+ goto loop_disable;
+ }
+ break;
+ case USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_DATA_PID:
+ /* Issue GetDeviceDescriptor, Sleep for 15 Sec. */
+ err = usb_get_device_descriptor(udev,
+ sizeof(udev->descriptor));
+ if (err < 0) {
+ dev_err(&udev->dev, "can't re-read"
+ "device descriptor for "
+ "OTG TEST MODE: %d\n", err);
+ goto loop_disable;
+ }
+ ssleep(15);
+ break;
+ default:
+ /* is_targeted() will take care for wrong PID */
+ dev_dbg(&udev->dev, "OTG TEST_MODE:Wrong"
+ "PID %d\n", idProduct);
+ break;
+ }
+
+ if (set_feature) {
+ err = hcd->driver->hub_control(hcd,
+ typeReq, wValue, wIndex,
+ NULL, 0);
+ }
+ }
+
+#endif
return;
loop_disable:
@@ -3522,7 +3772,7 @@ loop:
!(hcd->driver->port_handed_over)(hcd, port1))
dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
port1);
-
+
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
@@ -3689,7 +3939,7 @@ static void hub_events(void)
* EM interference sometimes causes badly
* shielded USB devices to be shutdown by
* the hub, this hack enables them again.
- * Works at least with mouse driver.
+ * Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change
@@ -4029,7 +4279,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (ret < 0)
goto re_enumerate;
-
+
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor)) {
dev_info(&udev->dev, "device firmware changed\n");
@@ -4062,6 +4312,16 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
goto re_enumerate;
}
mutex_unlock(hcd->bandwidth_mutex);
+
+#ifdef CONFIG_USB_OTG_20
+ ret = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_A_HNP_SUPPORT,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (ret < 0)
+ dev_err(&udev->dev, "set feature error\n");
+#endif
usb_set_device_state(udev, USB_STATE_CONFIGURED);
/* Put interfaces back into the same altsettings as before.
@@ -4102,7 +4362,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
done:
return 0;
-
+
re_enumerate:
hub_port_logical_disconnect(parent_hub, port1);
return -ENODEV;
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91dfa2..a5fdc3ac0d7 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -46,11 +46,18 @@ EXPORT_SYMBOL_GPL(usb_unregister_notify);
void usb_notify_add_device(struct usb_device *udev)
{
+#ifdef CONFIG_ARCH_U8500
+ usb_device_count++;
+#endif
+
blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
}
void usb_notify_remove_device(struct usb_device *udev)
{
+#ifdef CONFIG_ARCH_U8500
+ usb_device_count--;
+#endif
/* Protect against simultaneous usbfs open */
mutex_lock(&usbfs_mutex);
blocking_notifier_call_chain(&usb_notifier_list,
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index e8cdce571bb..86a09972836 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -43,12 +43,50 @@ static struct usb_device_id whitelist_table [] = {
{ USB_DEVICE(0x0525, 0xa4a0), },
#endif
+#ifdef CONFIG_USB_OTG_20
+{ USB_DEVICE_INFO(8, 6, 80) },/* Mass Storage Devices */
+{ USB_DEVICE_INFO(1, 1, 0) },/* Audio Devices */
+{ USB_DEVICE_INFO(3, 0, 0) },/* keyboard Devices */
+{ USB_DEVICE_INFO(3, 1, 2) },/* Mouse Devices */
+
+/* Test Devices */
+{ USB_DEVICE(0x1A0A, 0x0101), },/* Test_SE0_NAK */
+{ USB_DEVICE(0x1A0A, 0x0102), },/* Test_J */
+{ USB_DEVICE(0x1A0A, 0x0103), },/* Test_K */
+{ USB_DEVICE(0x1A0A, 0x0104), },/* Test_Packet */
+{ USB_DEVICE(0x1A0A, 0x0106), },/* HS_HOST_PORT_SUSPEND_RESUME */
+{ USB_DEVICE(0x1A0A, 0x0107), },/* SINGLE_STEP_GET_DEV_DESC */
+{ USB_DEVICE(0x1A0A, 0x0108), },/* SINGLE_STEP_ GET_DEV_DESC_DATA*/
+{ USB_DEVICE(0x1A0A, 0x0201), },/* OTG 2 TEST DEVICE*/
+#endif
{ } /* Terminating entry */
};
+/* The TEST_MODE Definition for OTG as per 6.4 of OTG Rev 2.0 */
+
+#ifdef CONFIG_USB_OTG_20
+#define USB_OTG_TEST_MODE_VID 0x1A0A
+#define USB_OTG_TEST_SE0_NAK_PID 0x0101
+#define USB_OTG_TEST_J_PID 0x0102
+#define USB_OTG_TEST_K_PID 0x0103
+#define USB_OTG_TEST_PACKET_PID 0x0104
+#define USB_OTG_TEST_HS_HOST_PORT_SUSPEND_RESUME_PID 0x0106
+#define USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_PID 0x0107
+#define USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_DATA_PID 0x0108
+
+#define USB_OTG_TEST_SE0_NAK 0x01
+#define USB_OTG_TEST_J 0x02
+#define USB_OTG_TEST_K 0x03
+#define USB_OTG_TEST_PACKET 0x04
+#endif
+
static int is_targeted(struct usb_device *dev)
{
struct usb_device_id *id = whitelist_table;
+#ifdef CONFIG_USB_OTG_20
+ u8 number_configs = 0;
+ u8 number_interface = 0;
+#endif
/* possible in developer configs only! */
if (!dev->bus->otg_port)
@@ -98,6 +136,36 @@ static int is_targeted(struct usb_device *dev)
/* add other match criteria here ... */
+#ifdef CONFIG_USB_OTG_20
+
+ /* Checking class,subclass and protocal at interface level */
+ for (number_configs = dev->descriptor.bNumConfigurations;
+ number_configs > 0; number_configs--)
+ for (number_interface = dev->config->desc.bNumInterfaces;
+ number_interface > 0;
+ number_interface--)
+ for (id = whitelist_table; id->match_flags; id++) {
+ if ((id->match_flags &
+ USB_DEVICE_ID_MATCH_DEV_CLASS) &&
+ (id->bDeviceClass !=
+ dev->config->intf_cache[number_interface-1]
+ ->altsetting[0].desc.bInterfaceClass))
+ continue;
+ if ((id->match_flags &
+ USB_DEVICE_ID_MATCH_DEV_SUBCLASS)
+ && (id->bDeviceSubClass !=
+ dev->config->intf_cache[number_interface-1]
+ ->altsetting[0].desc.bInterfaceSubClass))
+ continue;
+ if ((id->match_flags &
+ USB_DEVICE_ID_MATCH_DEV_PROTOCOL)
+ && (id->bDeviceProtocol !=
+ dev->config->intf_cache[number_interface-1]
+ ->altsetting[0].desc.bInterfaceProtocol))
+ continue;
+ return 1;
+ }
+#endif
/* OTG MESSAGE: report errors here, customize to match your product */
dev_err(&dev->dev, "device v%04x p%04x is not supported\n",
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 71648dcbe43..f7962567c1b 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,5 +1,9 @@
#include <linux/pm.h>
+#ifdef CONFIG_ARCH_U8500
+extern int usb_device_count;
+#endif
+
/* Functions local to drivers/usb/core/ */
extern int usb_create_sysfs_dev_files(struct usb_device *dev);
@@ -74,7 +78,9 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
}
#endif
-
+#ifdef CONFIG_USB_OTG
+extern void usb_hnp_polling_work(struct work_struct *work);
+#endif
#ifdef CONFIG_USB_SUSPEND
extern void usb_autosuspend_device(struct usb_device *udev);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 2633f759511..7e99677b81b 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -977,4 +977,15 @@ config USB_G_WEBCAM
endchoice
+config USB_OTG_20
+ bool "OTG 2.0 USB SUPPORT"
+ select USB_OTG
+ select PM_RUNTIME
+ select USB_OTG_WHITELIST
+ select USB_SUSPEND
+ default n
+ help
+ Enabling the whitelist (Target Peripheral List-TPL) and runtime power
+ management at system level and usb level for OTG 2.0.
+
endif # USB_GADGET
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 51f3d42f5a6..14cbfa80afb 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -315,6 +315,12 @@ struct usb_ep *usb_ep_autoconfig_ss(
#endif
}
+ if (gadget->ops->configure_ep) {
+ ep = gadget->ops->configure_ep(gadget, type, desc);
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
+ return ep;
+ }
+
/* Second, look at endpoints until an unclaimed one looks usable */
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
if (ep_matches(gadget, ep, desc, ep_comp))
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index cb8c162cae5..8473424e31f 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2786,6 +2786,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
curlun->cdrom = !!lcfg->cdrom;
curlun->ro = lcfg->cdrom || lcfg->ro;
+ curlun->nofua = 1;
curlun->initially_ro = curlun->ro;
curlun->removable = lcfg->removable;
curlun->dev.release = fsg_lun_release;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 52343654f5d..b8bfda4fc58 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -174,8 +174,8 @@ rndis_iad_descriptor = {
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, // control + data
.bFunctionClass = USB_CLASS_COMM,
- .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
+ .bFunctionProtocol = USB_CDC_ACM_PROTO_VENDOR,
/* .iFunction = DYNAMIC */
};
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f70cab3beee..f9e42041b5f 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -34,6 +34,8 @@ if USB_MUSB_HDRC
choice
prompt "Platform Glue Layer"
+ bool
+ default USB_MUSB_UX500 if ARCH_U8500 || ARCH_U5500
config USB_MUSB_DAVINCI
tristate "DaVinci"
@@ -60,7 +62,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "U8500 and U5500"
- depends on (ARCH_U8500 && AB8500_USB)
+ depends on (ARCH_U8500) || (ARCH_U5500)
endchoice
@@ -114,4 +116,13 @@ config MUSB_PIO_ONLY
endchoice
+config USB_MUSB_DEBUG
+ depends on USB_MUSB_HDRC
+ bool "Enable debugging messages"
+ default n
+ help
+ This enables musb debugging. To set the logging level use the debug
+ module parameter. Starting at level 3, per-transfer (urb, usb_request,
+ packet, or dma transfer) tracing may kick in.
+
endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 66aaccf0449..2923752b858 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -520,9 +520,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
/* see manual for the order of the tests */
if (int_usb & MUSB_INTR_SESSREQ) {
void __iomem *mbase = musb->mregs;
-
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
- && (devctl & MUSB_DEVCTL_BDEVICE)) {
+ || (devctl & MUSB_DEVCTL_BDEVICE)) {
dev_dbg(musb->controller, "SessReq while on B state\n");
return IRQ_HANDLED;
}
@@ -715,6 +714,9 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
b_host:
musb->xceiv->state = OTG_STATE_B_HOST;
hcd->self.is_b_host = 1;
+#ifdef CONFIG_USB_OTG_20
+ musb->g.otg_hnp_reqd = 0;
+#endif
musb->ignore_disconnect = 0;
del_timer(&musb->otg_timer);
break;
@@ -1036,9 +1038,6 @@ static void musb_shutdown(struct platform_device *pdev)
|| defined(CONFIG_USB_MUSB_AM35X) \
|| defined(CONFIG_USB_MUSB_AM35X_MODULE)
static ushort __devinitdata fifo_mode = 4;
-#elif defined(CONFIG_USB_MUSB_UX500) \
- || defined(CONFIG_USB_MUSB_UX500_MODULE)
-static ushort __devinitdata fifo_mode = 5;
#else
static ushort __devinitdata fifo_mode = 2;
#endif
@@ -1123,8 +1122,8 @@ static struct musb_fifo_cfg __devinitdata mode_4_cfg[] = {
/* mode 5 - fits in 8KB */
static struct musb_fifo_cfg __devinitdata mode_5_cfg[] = {
-{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
-{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
@@ -1752,7 +1751,9 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
{
struct musb *musb = dev_to_musb(dev);
unsigned short srp;
-
+#ifdef CONFIG_USB_OTG_20
+ musb->xceiv->start_srp(musb->xceiv);
+#endif
if (sscanf(buf, "%hu", &srp) != 1
|| (srp != 1)) {
dev_err(dev, "SRP: Value must be 1\n");
@@ -1766,10 +1767,45 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+static ssize_t
+ux500_set_extvbus(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+ unsigned short extvbus;
+
+ if (sscanf(buf, "%hu", &extvbus) != 1
+ || ((extvbus != 1) && (extvbus != 0))) {
+ dev_err(dev, "Invalid value EXTVBUS must be 1 or 0\n");
+ return -EINVAL;
+ }
+
+ plat->extvbus = extvbus;
+
+ return n;
+}
+
+static ssize_t
+ux500_get_extvbus(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+ int extvbus;
+
+ /* FIXME get_vbus_status() is normally #defined as false...
+ * and is effectively TUSB-specific.
+ */
+ extvbus = plat->extvbus;
+
+ return sprintf(buf, "EXTVBUS is %s\n",
+ extvbus ? "on" : "off");
+}
+static DEVICE_ATTR(extvbus, 0644, ux500_get_extvbus, ux500_set_extvbus);
+
static struct attribute *musb_attributes[] = {
&dev_attr_mode.attr,
&dev_attr_vbus.attr,
&dev_attr_srp.attr,
+ &dev_attr_extvbus.attr,
NULL
};
@@ -1886,7 +1922,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
status = -ENODEV;
goto fail0;
}
-
/* allocate */
musb = allocate_instance(dev, plat->config, ctrl);
if (!musb) {
@@ -2330,7 +2365,7 @@ static int musb_suspend(struct device *dev)
return 0;
}
-static int musb_resume_noirq(struct device *dev)
+static int musb_resume(struct device *dev)
{
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
@@ -2371,13 +2406,17 @@ static int musb_runtime_resume(struct device *dev)
static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
- .resume_noirq = musb_resume_noirq,
+ .resume = musb_resume,
.runtime_suspend = musb_runtime_suspend,
.runtime_resume = musb_runtime_resume,
};
-
+#ifdef CONFIG_UX500_SOC_DB8500
#define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
#else
+#define MUSB_DEV_PM_OPS NULL
+#endif
+
+#else
#define MUSB_DEV_PM_OPS NULL
#endif
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index f4a40f001c8..2d52530d3e4 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -153,7 +153,10 @@ enum musb_g_ep0_state {
#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
-
+#ifdef CONFIG_USB_OTG_20
+#define USB_SUSP_DET_DURATION 5 /* suspend time 5ms */
+#define TTST_SRP 3000 /* max 5 sec */
+#endif
/*************************** REGISTER ACCESS ********************************/
@@ -229,6 +232,8 @@ struct musb_platform_ops {
int (*adjust_channel_params)(struct dma_channel *channel,
u16 packet_sz, u8 *mode,
dma_addr_t *dma_addr, u32 *len);
+ struct usb_ep* (*configure_endpoints)(struct musb *musb, u8 type,
+ struct usb_endpoint_descriptor *desc);
};
/*
@@ -430,7 +435,6 @@ struct musb {
unsigned set_address:1;
unsigned test_mode:1;
unsigned softconnect:1;
-
u8 address;
u8 test_mode_nr;
u16 ackpend; /* ep0 */
@@ -603,4 +607,13 @@ static inline int musb_platform_exit(struct musb *musb)
return musb->ops->exit(musb);
}
+static inline struct usb_ep *musb_platform_configure_ep(struct musb *musb,
+ u8 type, struct usb_endpoint_descriptor *desc)
+{
+ struct usb_ep *ep = NULL;
+
+ if (musb->ops->configure_endpoints)
+ ep = musb->ops->configure_endpoints(musb, type, desc);
+ return ep;
+}
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 40a37c91cc1..19e3c91c22e 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -68,6 +68,7 @@ static const struct musb_register_map musb_regmap[] = {
{ "RxFIFOadd", 0x66, 16 },
{ "VControl", 0x68, 32 },
{ "HWVers", 0x6C, 16 },
+ { "EXTVBUS", 0x70, 8 },
{ "EPInfo", 0x78, 8 },
{ "RAMInfo", 0x79, 8 },
{ "LinkInfo", 0x7A, 8 },
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b11f7..b2abef69dc3 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -401,7 +401,21 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
- if (!musb_ep->hb_mult)
+ /*
+ * Enable Autoset according to table
+ * below
+ * ************************************
+ * bulk_split hb_mult Autoset_Enable
+ * ************************************
+ * 0 0 Yes(Normal)
+ * 0 >0 No(High BW ISO)
+ * 1 0 Yes(HS bulk)
+ * 1 >0 Yes(FS bulk)
+ */
+ if (!musb_ep->hb_mult ||
+ (musb_ep->hb_mult &&
+ can_bulk_split(musb,
+ musb_ep->type)))
csr |= MUSB_TXCSR_AUTOSET;
}
csr &= ~MUSB_TXCSR_P_UNDERRUN;
@@ -1097,6 +1111,12 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
+ /* Set the TXMAXP register correctly for Bulk IN
+ * endpoints in device mode
+ */
+ if (can_bulk_split(musb, musb_ep->type))
+ musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
+ musb_ep->packet_sz) - 1;
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
@@ -1642,7 +1662,9 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
}
spin_unlock_irqrestore(&musb->lock, flags);
+#ifndef CONFIG_USB_OTG_20
otg_start_srp(musb->xceiv->otg);
+#endif
spin_lock_irqsave(&musb->lock, flags);
/* Block idling for at least 1s */
@@ -1753,6 +1775,14 @@ static int musb_gadget_start(struct usb_gadget *g,
static int musb_gadget_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver);
+static struct usb_ep *musb_gadget_configure_ep(struct usb_gadget *gadget,
+ u8 type, struct usb_endpoint_descriptor *desc)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return musb_platform_configure_ep(musb, type, desc);
+}
+
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
.wakeup = musb_gadget_wakeup,
@@ -1762,6 +1792,7 @@ static const struct usb_gadget_ops musb_gadget_operations = {
.pullup = musb_gadget_pullup,
.udc_start = musb_gadget_start,
.udc_stop = musb_gadget_stop,
+ .configure_ep = musb_gadget_configure_ep,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index e40d7647caf..631aab86240 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -45,6 +45,11 @@
/* ep0 is always musb->endpoints[0].ep_in */
#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
+/* OTG 2.0 Specification 6.2.3 GetStatus commands */
+#ifdef CONFIG_USB_OTG_20
+#define OTG_STATUS_SELECT 0xF
+#endif
+
/*
* locking note: we use only the controller lock, for simpler correctness.
* It's always held with IRQs blocked.
@@ -80,21 +85,33 @@ static int service_tx_status_request(
int handled = 1;
u8 result[2], epnum = 0;
const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
-
+#ifdef CONFIG_USB_OTG_20
+ unsigned int otg_recip = ctrlrequest->wIndex >> 12;
+#endif
result[1] = 0;
switch (recip) {
case USB_RECIP_DEVICE:
- result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
- result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
- if (musb->g.is_otg) {
- result[0] |= musb->g.b_hnp_enable
- << USB_DEVICE_B_HNP_ENABLE;
- result[0] |= musb->g.a_alt_hnp_support
- << USB_DEVICE_A_ALT_HNP_SUPPORT;
- result[0] |= musb->g.a_hnp_support
- << USB_DEVICE_A_HNP_SUPPORT;
+#ifdef CONFIG_USB_OTG_20
+ if (!(otg_recip == OTG_STATUS_SELECT)) {
+#endif
+ result[0] = musb->is_self_powered <<
+ USB_DEVICE_SELF_POWERED;
+ result[0] |= musb->may_wakeup <<
+ USB_DEVICE_REMOTE_WAKEUP;
+ if (musb->g.is_otg) {
+ result[0] |= musb->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ result[0] |= musb->g.a_alt_hnp_support
+ << USB_DEVICE_A_ALT_HNP_SUPPORT;
+ result[0] |= musb->g.a_hnp_support
+ << USB_DEVICE_A_HNP_SUPPORT;
+ }
+#ifdef CONFIG_USB_OTG_20
+ } else {
+ result[0] = 1 & musb->g.otg_hnp_reqd;
}
+#endif
break;
case USB_RECIP_INTERFACE:
@@ -356,7 +373,22 @@ __acquires(musb->lock)
musb->test_mode_nr =
MUSB_TEST_PACKET;
break;
-
+#ifdef CONFIG_USB_OTG_20
+ case 6:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.otg_srp_reqd = 1;
+
+ mod_timer(&musb->otg_timer,
+ jiffies
+ + msecs_to_jiffies(TTST_SRP));
+ break;
+ case 7:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.otg_hnp_reqd = 1;
+ break;
+#endif
case 0xc0:
/* TEST_FORCE_HS */
pr_debug("TEST_FORCE_HS\n");
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ef8d744800a..0a108587e2e 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -46,7 +46,6 @@
#include "musb_core.h"
#include "musb_host.h"
-
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
@@ -108,24 +107,41 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
+ void __iomem *regs = ep->musb->mregs;
u16 csr;
- u16 lastcsr = 0;
- int retries = 1000;
+ u8 addr;
+ int retries = 3000; /* 3ms */
+ /*
+ * NOTE: We are using a hack here because the FIFO-FLUSH
+ * bit is broken in hardware! The hack consists of changing
+ * the TXFUNCADDR to an unused device address and waiting
+ * for any pending USB packets to hit the 3-strikes and your
+ * gone rule.
+ */
+ addr = musb_readb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR));
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- if (csr != lastcsr)
- dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
- lastcsr = csr;
- csr |= MUSB_TXCSR_FLUSHFIFO;
- musb_writew(epio, MUSB_TXCSR, csr);
+ musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum,
+ MUSB_TXFUNCADDR), 127);
csr = musb_readw(epio, MUSB_TXCSR);
- if (WARN(retries-- < 1,
- "Could not flush host TX%d fifo: csr: %04x\n",
- ep->epnum, csr))
- return;
- mdelay(1);
+ retries--;
+ if (retries == 0) {
+ /* can happen if the USB clocks are OFF */
+ dev_dbg(musb->controller, "Could not flush host TX%d "
+ "fifo: csr=0x%04x\n", ep->epnum, csr);
+ break;
+ }
+ udelay(1);
}
+ /* clear any errors */
+ csr &= ~(MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT);
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ /* restore endpoint address */
+ musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR), addr);
}
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
@@ -615,16 +631,26 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
u16 csr;
u8 mode;
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (length > channel->max_len)
length = channel->max_len;
csr = musb_readw(epio, MUSB_TXCSR);
- if (length > pkt_size) {
+ if (length >= pkt_size) {
mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
- if (qh->hb_mult == 1)
+ /*
+ * Enable Autoset according to table
+ * below
+ * bulk_split hb_mult Autoset_Enable
+ * 0 1 Yes(Normal)
+ * 0 >1 No(High BW ISO)
+ * 1 1 Yes(HS bulk)
+ * 1 >1 Yes(FS bulk)
+ */
+ if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
+ can_bulk_split(hw_ep->musb, qh->type)))
csr |= MUSB_TXCSR_AUTOSET;
} else {
mode = 0;
@@ -771,6 +797,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ /*
+ * Set the TXMAXP register correctly for Bulk OUT
+ * endpoints in host mode
+ */
+ if (can_bulk_split(musb, qh->type))
+ qh->hb_mult = hw_ep->max_packet_sz_tx
+ / packet_sz;
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
@@ -802,6 +835,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (load_count) {
/* PIO to load FIFO */
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
qh->segsize = load_count;
musb_write_fifo(hw_ep, load_count, buf);
}
@@ -894,6 +929,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count < len)
urb->status = -EOVERFLOW;
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_read_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
@@ -933,6 +970,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
+ /* Unmap the buffer so that CPU can use it */
+ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
@@ -1134,6 +1173,22 @@ void musb_host_tx(struct musb *musb, u8 epnum)
dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
status = -ETIMEDOUT;
+ } else if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
+ /* BUSY - can happen during USB transfer cancel */
+
+ /* MUSB_TXCSR_TXPKTRDY indicates that the data written
+ * to the FIFO by DMA has not still gone on the USB bus.
+ * DMA completion callback doesn't indicate that data has
+ * gone on the USB bus. So, if we reach this case, need to
+ * wait for the MUSB_TXCSR_TXPKTRDY to be cleared and then
+ * proceed.
+ */
+ dev_dbg(musb->controller, "TXPKTRDY set. Data transfer ongoing. Wait...\n");
+
+ do {
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ } while ((tx_csr & MUSB_TXCSR_TXPKTRDY) != 0);
+ dev_dbg(musb->controller, "TXPKTRDY Cleared. Continue...\n");
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
@@ -1427,7 +1482,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
size_t xfer_len;
void __iomem *mbase = musb->mregs;
int pipe;
- u16 rx_csr, val;
+ u16 rx_csr, val, restore_csr;
bool iso_err = false;
bool done = false;
u32 status;
@@ -1537,7 +1592,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-#ifndef CONFIG_USB_INVENTRA_DMA
+#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
@@ -1569,7 +1624,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
@@ -1625,7 +1680,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#ifdef CONFIG_USB_INVENTRA_DMA
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (dma) {
struct dma_controller *c;
u16 rx_count;
@@ -1709,6 +1764,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
*/
val = musb_readw(epio, MUSB_RXCSR);
+
+ /* retain the original value,
+ * which will be used to reset CSR
+ */
+ restore_csr = val;
val &= ~MUSB_RXCSR_H_REQPKT;
if (dma->desired_mode == 0)
@@ -1736,7 +1796,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
c->channel_release(dma);
hw_ep->rx_channel = NULL;
dma = NULL;
- /* REVISIT reset CSR */
+ musb_writew(epio, MUSB_RXCSR, restore_csr);
}
}
#endif /* Mentor DMA */
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 03f2655af29..1c96cf60907 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -246,7 +246,9 @@
*/
#define MUSB_DEVCTL 0x60 /* 8 bit */
-
+#ifdef CONFIG_USB_OTG_20
+#define MUSB_MISC 0x61 /* 8 bit */
+#endif
/* These are always controlled through the INDEX register */
#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 22ec3e37998..702d5efe9ef 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -379,7 +379,7 @@ int musb_hub_control(
musb_port_suspend(musb, true);
break;
case USB_PORT_FEAT_TEST:
- if (unlikely(is_host_active(musb)))
+ if (unlikely(!is_host_active(musb)))
goto error;
wIndex >>= 8;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index aa09dd417b9..c1a205cd5e3 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -25,9 +25,15 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <mach/id.h>
+#include <mach/usb.h>
#include "musb_core.h"
+#define DEFAULT_DEVCTL 0x81
+static void ux500_musb_set_vbus(struct musb *musb, int is_on);
+
struct ux500_glue {
struct device *dev;
struct platform_device *musb;
@@ -35,17 +41,439 @@ struct ux500_glue {
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
+static struct timer_list notify_timer;
+static struct musb_context_registers context;
+static bool context_stored;
+struct musb *_musb;
+
+static void ux500_store_context(struct musb *musb)
+{
+#ifdef CONFIG_PM
+ int i;
+ void __iomem *musb_base;
+ void __iomem *epio;
+
+ if (cpu_is_u5500()) {
+ if (musb != NULL)
+ _musb = musb;
+ else
+ return;
+ }
+
+ musb_base = musb->mregs;
+
+ if (is_host_enabled(musb)) {
+ context.frame = musb_readw(musb_base, MUSB_FRAME);
+ context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
+ }
+ context.power = musb_readb(musb_base, MUSB_POWER);
+ context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+ context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+ context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ context.index = musb_readb(musb_base, MUSB_INDEX);
+ context.devctl = DEFAULT_DEVCTL;
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ context.index_regs[i].txmaxp =
+ musb_readw(epio, MUSB_TXMAXP);
+ context.index_regs[i].txcsr =
+ musb_readw(epio, MUSB_TXCSR);
+ context.index_regs[i].rxmaxp =
+ musb_readw(epio, MUSB_RXMAXP);
+ context.index_regs[i].rxcsr =
+ musb_readw(epio, MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
+ if (is_host_enabled(musb)) {
+ context.index_regs[i].txtype =
+ musb_readb(epio, MUSB_TXTYPE);
+ context.index_regs[i].txinterval =
+ musb_readb(epio, MUSB_TXINTERVAL);
+ context.index_regs[i].rxtype =
+ musb_readb(epio, MUSB_RXTYPE);
+ context.index_regs[i].rxinterval =
+ musb_readb(epio, MUSB_RXINTERVAL);
+
+ context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+ }
+ context_stored = true;
+#endif
+}
+
+void ux500_restore_context(struct musb *musb)
+{
+#ifdef CONFIG_PM
+ int i;
+ void __iomem *musb_base;
+ void __iomem *ep_target_regs;
+ void __iomem *epio;
+
+ if (!context_stored)
+ return;
+
+ if (cpu_is_u5500()) {
+ if (_musb != NULL)
+ musb = _musb;
+ else
+ return;
+ }
+
+ musb_base = musb->mregs;
+ if (is_host_enabled(musb)) {
+ musb_writew(musb_base, MUSB_FRAME, context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, context.testmode);
+ musb_write_ulpi_buscontrol(musb->mregs, context.busctl);
+ }
+ musb_writeb(musb_base, MUSB_POWER, context.power);
+ musb_writew(musb_base, MUSB_INTRTXE, context.intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, context.intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, context.devctl);
+
+ for (i = 0; i < musb->config->num_eps; ++i) {
+ struct musb_hw_ep *hw_ep;
+
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
+ musb_writew(epio, MUSB_TXMAXP,
+ context.index_regs[i].txmaxp);
+ musb_writew(epio, MUSB_TXCSR,
+ context.index_regs[i].txcsr);
+ musb_writew(epio, MUSB_RXMAXP,
+ context.index_regs[i].rxmaxp);
+ musb_writew(epio, MUSB_RXCSR,
+ context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ context.index_regs[i].rxfifoadd);
+ }
+
+ if (is_host_enabled(musb)) {
+ musb_writeb(epio, MUSB_TXTYPE,
+ context.index_regs[i].txtype);
+ musb_writeb(epio, MUSB_TXINTERVAL,
+ context.index_regs[i].txinterval);
+ musb_writeb(epio, MUSB_RXTYPE,
+ context.index_regs[i].rxtype);
+ musb_writeb(epio, MUSB_RXINTERVAL,
+
+ musb->context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ context.index_regs[i].rxhubport);
+ }
+ }
+ musb_writeb(musb_base, MUSB_INDEX, context.index);
+#endif
+}
+
+static void musb_notify_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+
+ u8 devctl;
+ dev_dbg(musb->controller, "musb_notify_idle %s",
+ otg_state_string(musb->xceiv->state));
+ spin_lock_irqsave(&musb->lock, flags);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ switch (musb->xceiv->state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ if (cpu_is_u8500()) {
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/* blocking notifier support */
+static int musb_otg_notifications(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct musb *musb = container_of(nb, struct musb, nb);
+
+ dev_dbg(musb->controller, "musb_otg_notifications %ld %s\n",
+ event, otg_state_string(musb->xceiv->state));
+ switch (event) {
+
+ case USB_EVENT_PREPARE:
+ pm_runtime_get_sync(musb->controller);
+ ux500_restore_context(musb);
+ break;
+ case USB_EVENT_ID:
+ case USB_EVENT_RIDA:
+ dev_dbg(musb->controller, "ID GND\n");
+ if (is_otg_enabled(musb)) {
+ ux500_musb_set_vbus(musb, 1);
+ }
+ break;
+
+ case USB_EVENT_VBUS:
+ dev_dbg(musb->controller, "VBUS Connect\n");
+
+ break;
+/* case USB_EVENT_RIDB: FIXME, not yet managed */
+ case USB_EVENT_NONE:
+ dev_dbg(musb->controller, "VBUS Disconnect\n");
+ if (is_otg_enabled(musb) && musb->is_host)
+ ux500_musb_set_vbus(musb, 0);
+ else
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ break;
+ case USB_EVENT_CLEAN:
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ break;
+ default:
+ dev_dbg(musb->controller, "ID float\n");
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static void ux500_musb_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ int ret = 1;
+ struct musb_hdrc_platform_data *plat = musb->controller->platform_data;
+#ifdef CONFIG_USB_OTG_20
+ int val = 0;
+#endif
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+#ifdef CONFIG_USB_OTG_20
+ val = musb_readb(musb->mregs, MUSB_MISC);
+ val |= 0x1C;
+ musb_writeb(musb->mregs, MUSB_MISC, val);
+#endif
+
+ /* Use EXTVBUS */
+ u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
+ if (plat->extvbus) {
+ busctl |= MUSB_ULPI_USE_EXTVBUS;
+ musb_write_ulpi_buscontrol(musb->mregs, busctl);
+ } else {
+ busctl &= ~MUSB_ULPI_USE_EXTVBUS;
+ musb_write_ulpi_buscontrol(musb->mregs, busctl);
+ }
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ /*
+ * Wait for the musb to set as A device to enable the
+ * VBUS
+ */
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(musb->controller,
+ "configured as A device timeout");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ } else {
+ musb->is_active = 1;
+ musb->xceiv->otg->default_a = 1;
+ musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+ MUSB_HST_MODE(musb);
+ }
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+ musb->xceiv->otg->default_a = 0;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ dev_dbg(musb->controller, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb->xceiv->state),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static void ux500_musb_try_idle(struct musb *musb, unsigned long timeout)
+{
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = jiffies + msecs_to_jiffies(3);
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
+ dev_dbg(musb->controller, "%s active, deleting timer\n",
+ otg_state_string(musb->xceiv->state));
+ del_timer(&notify_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&notify_timer))
+ last_timer = timeout;
+ else {
+ dev_dbg(musb->controller, "Longer idle timer "
+ "already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb->xceiv->state),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&notify_timer, timeout);
+}
+
+static void ux500_musb_enable(struct musb *musb)
+{
+ ux500_store_context(musb);
+}
+
+static struct usb_ep *ux500_musb_configure_endpoints(struct musb *musb,
+ u8 type, struct usb_endpoint_descriptor *desc)
+{
+ struct usb_ep *ep = NULL;
+ struct usb_gadget *gadget = &musb->g;
+ char name[4];
+
+ if (USB_ENDPOINT_XFER_INT == type) {
+ list_for_each_entry(ep, &gadget->ep_list, ep_list) {
+ if (ep->maxpacket == 512)
+ continue;
+ if (NULL == ep->driver_data) {
+ strncpy(name, (ep->name + 3), 4);
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ if (strcmp("in", name) == 0)
+ return ep;
+ }
+ }
+ }
+ return ep;
+}
+
static int ux500_musb_init(struct musb *musb)
{
+ int status;
+
musb->xceiv = usb_get_transceiver();
if (!musb->xceiv) {
pr_err("HS USB OTG: no transceiver configured\n");
return -ENODEV;
}
+ pm_runtime_get_noresume(musb->controller);
+ musb->nb.notifier_call = musb_otg_notifications;
+ status = usb_register_notifier(musb->xceiv, &musb->nb);
+
+ if (status < 0) {
+ dev_dbg(musb->controller, "notification register failed\n");
+ goto err1;
+ }
+
+ setup_timer(&notify_timer, musb_notify_idle, (unsigned long) musb);
return 0;
+err1:
+ pm_runtime_disable(musb->controller);
+ return status;
}
+/**
+ * ux500_musb_exit() - unregister the platform USB driver.
+ * @musb: struct musb pointer.
+ *
+ * This function unregisters the USB controller.
+ */
static int ux500_musb_exit(struct musb *musb)
{
usb_put_transceiver(musb->xceiv);
@@ -56,8 +484,21 @@ static int ux500_musb_exit(struct musb *musb)
static const struct musb_platform_ops ux500_ops = {
.init = ux500_musb_init,
.exit = ux500_musb_exit,
+
+ .set_vbus = ux500_musb_set_vbus,
+ .try_idle = ux500_musb_try_idle,
+
+ .enable = ux500_musb_enable,
+ .configure_endpoints = ux500_musb_configure_endpoints,
};
+/**
+ * ux500_probe() - Allocate the resources.
+ * @pdev: struct platform_device.
+ *
+ * This function allocates the required memory for the
+ * structures and initialize interrupts.
+ */
static int __devinit ux500_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
@@ -122,12 +563,12 @@ static int __devinit ux500_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register musb device\n");
goto err4;
}
+ pm_runtime_enable(&pdev->dev);
return 0;
-
err4:
- clk_disable(clk);
-
+ if (cpu_is_u5500())
+ clk_disable(clk);
err3:
clk_put(clk);
@@ -147,43 +588,99 @@ static int __devexit ux500_remove(struct platform_device *pdev)
platform_device_del(glue->musb);
platform_device_put(glue->musb);
- clk_disable(glue->clk);
+ if (cpu_is_u5500())
+ clk_disable(glue->clk);
clk_put(glue->clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
kfree(glue);
return 0;
}
#ifdef CONFIG_PM
+/**
+ * ux500_suspend() - Handles the platform suspend.
+ * @dev: struct device
+ *
+ * This function gets triggered when the platform
+ * is going to suspend
+ */
static int ux500_suspend(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
usb_phy_set_suspend(musb->xceiv, 1);
- clk_disable(glue->clk);
+ if (cpu_is_u5500())
+ /*
+ * Since this clock is in the APE domain, it will
+ * automatically be disabled on suspend.
+ * (And enabled on resume automatically.)
+ */
+ clk_disable(glue->clk);
+ dev_dbg(dev, "ux500_suspend\n");
return 0;
}
+/**
+ * ux500_resume() - Handles the platform resume.
+ * @dev: struct device
+ *
+ * This function gets triggered when the platform
+ * is going to resume
+ */
static int ux500_resume(struct device *dev)
{
struct ux500_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- int ret;
+
+ if (cpu_is_u5500())
+ /* No point in propagating errors on resume */
+ (void) clk_enable(glue->clk);
+ dev_dbg(dev, "ux500_resume\n");
+
+ usb_phy_set_suspend(musb->xceiv, 0);
+
+ return 0;
+}
+#ifdef CONFIG_UX500_SOC_DB8500
+static int ux500_musb_runtime_resume(struct device *dev)
+{
+ struct ux500_glue *glue = dev_get_drvdata(dev);
+ int ret;
+
+ if (cpu_is_u5500())
+ return 0;
ret = clk_enable(glue->clk);
if (ret) {
- dev_err(dev, "failed to enable clock\n");
+ dev_dbg(dev, "Unable to enable clk\n");
return ret;
}
+ dev_dbg(dev, "ux500_musb_runtime_resume\n");
+ return 0;
+}
- usb_phy_set_suspend(musb->xceiv, 0);
+static int ux500_musb_runtime_suspend(struct device *dev)
+{
+ struct ux500_glue *glue = dev_get_drvdata(dev);
+ if (cpu_is_u5500())
+ return 0;
+
+ clk_disable(glue->clk);
+ dev_dbg(dev, "ux500_musb_runtime_suspend\n");
return 0;
}
-
+#endif
static const struct dev_pm_ops ux500_pm_ops = {
+#ifdef CONFIG_UX500_SOC_DB8500
+ SET_RUNTIME_PM_OPS(ux500_musb_runtime_suspend,
+ ux500_musb_runtime_resume, NULL)
+#endif
.suspend = ux500_suspend,
.resume = ux500_resume,
};
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d05c7fbbb70..7bf0c289ef5 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -32,6 +32,11 @@
#include <linux/pfn.h>
#include <mach/usb.h>
#include "musb_core.h"
+#undef DBG
+#undef WARNING
+#undef INFO
+#include <linux/usb/composite.h>
+#define Ux500_USB_DMA_MIN_TRANSFER_SIZE 512
struct ux500_dma_channel {
struct dma_channel channel;
@@ -64,14 +69,14 @@ void ux500_dma_callback(void *private_data)
struct musb *musb = hw_ep->musb;
unsigned long flags;
- dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
+ dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
hw_ep->epnum);
spin_lock_irqsave(&musb->lock, flags);
ux500_channel->channel.actual_len = ux500_channel->cur_len;
ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
musb_dma_completion(musb, hw_ep->epnum,
- ux500_channel->is_tx);
+ ux500_channel->is_tx);
spin_unlock_irqrestore(&musb->lock, flags);
}
@@ -134,6 +139,15 @@ static bool ux500_configure_channel(struct dma_channel *channel,
return true;
}
+/**
+ * ux500_dma_controller_allocate() - allocates the DMA channels
+ * @c: pointer to DMA controller
+ * @hw_ep: pointer to endpoint
+ * @is_tx: transmit or receive direction
+ *
+ * This function allocates the DMA channel and initializes
+ * the channel
+*/
static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 is_tx)
{
@@ -172,7 +186,13 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
return &(ux500_channel->channel);
}
-
+/**
+ * ux500_dma_channel_release() - releases the DMA channel
+ * @channel: channel to be released
+ *
+ * This function releases the DMA channel
+ *
+*/
static void ux500_dma_channel_release(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
@@ -190,26 +210,71 @@ static void ux500_dma_channel_release(struct dma_channel *channel)
static int ux500_dma_is_compatible(struct dma_channel *channel,
u16 maxpacket, void *buf, u32 length)
{
- if ((maxpacket & 0x3) ||
- ((int)buf & 0x3) ||
- (length < 512) ||
- (length & 0x3))
- return false;
- else
- return true;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ struct usb_descriptor_header **descriptors;
+ struct usb_function *f;
+ struct usb_gadget *gadget = &musb->g;
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+
+ if (length < Ux500_USB_DMA_MIN_TRANSFER_SIZE)
+ return 0;
+
+ list_for_each_entry(f, &cdev->config->functions, list) {
+ if (!strcmp(f->name, "cdc_ethernet") ||
+ !strcmp(f->name, "rndis") ||
+ !strcmp(f->name, "mtp") ||
+ !strcmp(f->name, "phonet") ||
+ !strcmp(f->name, "adb")) {
+ if (gadget->speed == USB_SPEED_HIGH)
+ descriptors = f->hs_descriptors;
+ else
+ descriptors = f->descriptors;
+
+ for (; *descriptors; ++descriptors) {
+ struct usb_endpoint_descriptor *ep;
+
+ if ((*descriptors)->bDescriptorType !=
+ USB_DT_ENDPOINT)
+ continue;
+
+ ep = (struct usb_endpoint_descriptor *)
+ *descriptors;
+ if (ep->bEndpointAddress ==
+ ux500_channel->hw_ep->epnum)
+ return 0;
+ }
+ }
+ }
+
+ return 1;
}
+/**
+ * ux500_dma_channel_program() - Configures the channel and initiates transfer
+ * @channel: pointer to DMA channel
+ * @packet_sz: packet size
+ * @mode: mode
+ * @dma_addr: physical address of memory
+ * @len: length
+ *
+ * This function configures the channel and initiates the DMA transfer
+*/
static int ux500_dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
int ret;
+ struct ux500_dma_channel *ux500_dma_channel = channel->private_data;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
- if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
- return false;
+ if (len < Ux500_USB_DMA_MIN_TRANSFER_SIZE)
+ return 0;
+ if (!ux500_dma_channel->is_tx && len < packet_sz)
+ return 0;
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
@@ -220,6 +285,12 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
return ret;
}
+/**
+ * ux500_dma_channel_abort() - aborts the DMA transfer
+ * @channel: pointer to DMA channel.
+ *
+ * This function aborts the DMA transfer.
+*/
static int ux500_dma_channel_abort(struct dma_channel *channel)
{
struct ux500_dma_channel *ux500_channel = channel->private_data;
@@ -254,6 +325,12 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
return 0;
}
+/**
+ * ux500_dma_controller_stop() - releases all the channels and frees the DMA pipes
+ * @c: pointer to DMA controller
+ *
+ * This function frees all of the logical channels and frees the DMA pipes
+*/
static int ux500_dma_controller_stop(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -285,6 +362,15 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
return 0;
}
+
+/**
+ * ux500_dma_controller_start() - creates the logical channels pool and registers callbacks
+ * @c: pointer to DMA Controller
+ *
+ * This function requests the logical channels from the DMA driver and creates
+ * logical channels based on event lines and also registers the callbacks which
+ * are invoked after data transfer in the transmit or receive direction.
+*/
static int ux500_dma_controller_start(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -356,6 +442,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
return 0;
}
+/**
+ * dma_controller_destroy() - deallocates the DMA controller
+ * @c: pointer to dma controller.
+ *
+ * This function deallocates the DMA controller.
+*/
void dma_controller_destroy(struct dma_controller *c)
{
struct ux500_dma_controller *controller = container_of(c,
@@ -364,6 +456,15 @@ void dma_controller_destroy(struct dma_controller *c)
kfree(controller);
}
+/**
+ * dma_controller_create() - creates the dma controller and initializes callbacks
+ *
+ * @musb: pointer to mentor core driver data instance|
+ * @base: base address of musb registers.
+ *
+ * This function creates the DMA controller and initializes the callbacks
+ * that are invoked from the Mentor IP core.
+*/
struct dma_controller *__init
dma_controller_create(struct musb *musb, void __iomem *base)
{
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 5c87db06b59..168f5b0364f 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -108,6 +108,15 @@ config AB8500_USB
This transceiver supports high and full speed devices plus,
in host mode, low speed.
+config AB5500_USB
+ tristate "AB5500 USB Transceiver Driver"
+ depends on AB5500_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver in AB5500 chip.
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
+
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 41aa5098b13..e227d9add96 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_USB_ULPI) += ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o
obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
+obj-$(CONFIG_AB5500_USB) += ab5500-usb.o
fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o
obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o
obj-$(CONFIG_USB_MV_OTG) += mv_otg.o
diff --git a/drivers/usb/otg/ab5500-usb.c b/drivers/usb/otg/ab5500-usb.c
new file mode 100644
index 00000000000..e8c06e694a6
--- /dev/null
+++ b/drivers/usb/otg/ab5500-usb.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Avinash Kumar <avinash.kumar@stericsson.com> for ST-Ericsson
+ * Author: Ravi Kant SINGH <ravikant.singh@stericsson.com> for ST-Ericsson
+ * Author: Supriya s KARANTH <supriya.karanth@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/abx500/ab5500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <mach/usb.h>
+#include <linux/kernel_stat.h>
+#include <mach/gpio.h>
+#include <mach/reboot_reasons.h>
+#include <linux/pm_qos.h>
+
+/* AB5500 USB macros
+ */
+#define AB5500_MAIN_WATCHDOG_ENABLE 0x1
+#define AB5500_MAIN_WATCHDOG_KICK 0x2
+#define AB5500_MAIN_WATCHDOG_DISABLE 0x0
+#define AB5500_USB_ADP_ENABLE 0x1
+#define AB5500_WATCHDOG_DELAY 10
+#define AB5500_WATCHDOG_DELAY_US 100
+#define AB5500_PHY_DELAY_US 100
+#define AB5500_MAIN_WDOG_CTRL_REG 0x01
+#define AB5500_USB_LINE_STAT_REG 0x80
+#define AB5500_USB_PHY_CTRL_REG 0x8A
+#define AB5500_MAIN_WATCHDOG_ENABLE 0x1
+#define AB5500_MAIN_WATCHDOG_KICK 0x2
+#define AB5500_MAIN_WATCHDOG_DISABLE 0x0
+#define AB5500_SYS_CTRL2_BLOCK 0x2
+
+/* UsbLineStatus register bit masks */
+#define AB5500_USB_LINK_STATUS_MASK_V1 0x78
+#define AB5500_USB_LINK_STATUS_MASK_V2 0xF8
+
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+#define USB_LIMIT (200) /* If we have more than 200 irqs per second */
+
+static struct pm_qos_request usb_pm_qos_latency;
+static bool usb_pm_qos_is_latency_0;
+
+#define PUBLIC_ID_BACKUPRAM1 (U5500_BACKUPRAM1_BASE + 0x0FC0)
+#define MAX_USB_SERIAL_NUMBER_LEN 31
+
+/* UsbLineStatus register - usb types */
+enum ab5500_usb_link_status {
+ USB_LINK_NOT_CONFIGURED,
+ USB_LINK_STD_HOST_NC,
+ USB_LINK_STD_HOST_C_NS,
+ USB_LINK_STD_HOST_C_S,
+ USB_LINK_HOST_CHG_NM,
+ USB_LINK_HOST_CHG_HS,
+ USB_LINK_HOST_CHG_HS_CHIRP,
+ USB_LINK_DEDICATED_CHG,
+ USB_LINK_ACA_RID_A,
+ USB_LINK_ACA_RID_B,
+ USB_LINK_ACA_RID_C_NM,
+ USB_LINK_ACA_RID_C_HS,
+ USB_LINK_ACA_RID_C_HS_CHIRP,
+ USB_LINK_HM_IDGND,
+ USB_LINK_OTG_HOST_NO_CURRENT,
+ USB_LINK_NOT_VALID_LINK,
+ USB_LINK_PHY_EN_NO_VBUS_NO_IDGND,
+ USB_LINK_STD_UPSTREAM_NO_VBUS_NO_IDGND,
+ USB_LINK_HM_IDGND_V2
+};
+
+/**
+ * ab5500_usb_mode - Different states of ab usb_chip
+ *
+ * Used for USB cable plug-in state machine
+ */
+enum ab5500_usb_mode {
+ USB_IDLE,
+ USB_DEVICE,
+ USB_HOST,
+ USB_DEDICATED_CHG,
+};
+struct ab5500_usb {
+ struct otg_transceiver otg;
+ struct device *dev;
+ int irq_num_id_fall;
+ int irq_num_vbus_rise;
+ int irq_num_vbus_fall;
+ int irq_num_link_status;
+ unsigned vbus_draw;
+ struct delayed_work dwork;
+ struct work_struct phy_dis_work;
+ unsigned long link_status_wait;
+ int rev;
+ int usb_cs_gpio;
+ enum ab5500_usb_mode mode;
+ struct clk *sysclk;
+ struct regulator *v_ape;
+ struct abx500_usbgpio_platform_data *usb_gpio;
+ struct delayed_work work_usb_workaround;
+ bool phy_enabled;
+};
+
+static int ab5500_usb_irq_setup(struct platform_device *pdev,
+ struct ab5500_usb *ab);
+static int ab5500_usb_boot_detect(struct ab5500_usb *ab);
+static int ab5500_usb_link_status_update(struct ab5500_usb *ab);
+
+static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host);
+static void ab5500_usb_phy_disable(struct ab5500_usb *ab, bool sel_host);
+
+static inline struct ab5500_usb *xceiv_to_ab(struct otg_transceiver *x)
+{
+ return container_of(x, struct ab5500_usb, otg);
+}
+
+/**
+ * ab5500_usb_wd_workaround() - Kick the watch dog timer
+ *
+ * This function used to Kick the watch dog timer
+ */
+static void ab5500_usb_wd_workaround(struct ab5500_usb *ab)
+{
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ AB5500_MAIN_WATCHDOG_ENABLE);
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ (AB5500_MAIN_WATCHDOG_ENABLE
+ | AB5500_MAIN_WATCHDOG_KICK));
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+
+ abx500_set_register_interruptible(ab->dev,
+ AB5500_SYS_CTRL2_BLOCK,
+ AB5500_MAIN_WDOG_CTRL_REG,
+ AB5500_MAIN_WATCHDOG_DISABLE);
+
+ udelay(AB5500_WATCHDOG_DELAY_US);
+}
+
+static void ab5500_usb_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+ struct delayed_work *work_usb_workaround = to_delayed_work(work);
+ struct ab5500_usb *ab = container_of(work_usb_workaround,
+ struct ab5500_usb, work_usb_workaround);
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB5500_USBOTG, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > USB_LIMIT) {
+
+ if (!usb_pm_qos_is_latency_0) {
+
+ pm_qos_add_request(&usb_pm_qos_latency,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+ usb_pm_qos_is_latency_0 = true;
+ }
+ } else {
+
+ if (usb_pm_qos_is_latency_0) {
+
+ pm_qos_remove_request(&usb_pm_qos_latency);
+ usb_pm_qos_is_latency_0 = false;
+ }
+ }
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+}
+
+static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host)
+{
+ int ret = 0;
+ /* Workaround for spurious interrupt to be checked with Hardware Team*/
+ if (ab->phy_enabled == true)
+ return;
+ ab->phy_enabled = true;
+
+ ab->usb_gpio->enable();
+ clk_enable(ab->sysclk);
+ regulator_enable(ab->v_ape);
+
+ /* TODO: Remove ux500_resotore_context and handle similar to ab8500 */
+ ux500_restore_context(NULL);
+ ret = gpio_direction_output(ab->usb_cs_gpio, 0);
+ if (ret < 0) {
+ dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n");
+ gpio_free(ab->usb_cs_gpio);
+ return;
+ }
+ gpio_set_value(ab->usb_cs_gpio, 1);
+ if (sel_host) {
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+ }
+}
+
+static void ab5500_usb_phy_disable(struct ab5500_usb *ab, bool sel_host)
+{
+ /* Workaround for spurious interrupt to be checked with Hardware Team*/
+ if (ab->phy_enabled == false)
+ return;
+ ab->phy_enabled = false;
+
+ /* Needed to disable the phy.*/
+ ab5500_usb_wd_workaround(ab);
+ clk_disable(ab->sysclk);
+ regulator_disable(ab->v_ape);
+ ab->usb_gpio->disable();
+ gpio_set_value(ab->usb_cs_gpio, 0);
+
+ if (sel_host) {
+ if (usb_pm_qos_is_latency_0) {
+
+ pm_qos_remove_request(&usb_pm_qos_latency);
+ usb_pm_qos_is_latency_0 = false;
+ }
+ cancel_delayed_work_sync(&ab->work_usb_workaround);
+ }
+}
+
+#define ab5500_usb_peri_phy_en(ab) ab5500_usb_phy_enable(ab, false)
+#define ab5500_usb_peri_phy_dis(ab) ab5500_usb_phy_disable(ab, false)
+#define ab5500_usb_host_phy_en(ab) ab5500_usb_phy_enable(ab, true)
+#define ab5500_usb_host_phy_dis(ab) ab5500_usb_phy_disable(ab, true)
+
+/* Work created after an link status update handler*/
+static int ab5500_usb_link_status_update(struct ab5500_usb *ab)
+{
+ u8 val = 0;
+ enum ab5500_usb_link_status lsts;
+ enum usb_xceiv_events event = USB_EVENT_NONE;
+
+ (void)abx500_get_register_interruptible(ab->dev,
+ AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &val);
+
+ if (ab->rev >= AB5500_2_0)
+ lsts = (val & AB5500_USB_LINK_STATUS_MASK_V2) >> 3;
+ else
+ lsts = (val & AB5500_USB_LINK_STATUS_MASK_V1) >> 3;
+
+ switch (lsts) {
+
+ case USB_LINK_STD_HOST_NC:
+ case USB_LINK_STD_HOST_C_NS:
+ case USB_LINK_STD_HOST_C_S:
+ case USB_LINK_HOST_CHG_NM:
+ case USB_LINK_HOST_CHG_HS:
+ case USB_LINK_HOST_CHG_HS_CHIRP:
+
+ event = USB_EVENT_VBUS;
+ ab5500_usb_peri_phy_en(ab);
+
+ break;
+ case USB_LINK_DEDICATED_CHG:
+ /* TODO: vbus_draw */
+ event = USB_EVENT_CHARGER;
+ break;
+
+ case USB_LINK_HM_IDGND:
+ if (ab->rev >= AB5500_2_0)
+ return -1;
+
+
+ ab5500_usb_host_phy_en(ab);
+
+ ab->otg.default_a = true;
+ event = USB_EVENT_ID;
+
+ break;
+ case USB_LINK_PHY_EN_NO_VBUS_NO_IDGND:
+ ab5500_usb_peri_phy_dis(ab);
+
+ break;
+ case USB_LINK_STD_UPSTREAM_NO_VBUS_NO_IDGND:
+ ab5500_usb_host_phy_dis(ab);
+
+ break;
+
+ case USB_LINK_HM_IDGND_V2:
+ if (!(ab->rev >= AB5500_2_0))
+ return -1;
+
+
+ ab5500_usb_host_phy_en(ab);
+
+ ab->otg.default_a = true;
+ event = USB_EVENT_ID;
+
+ break;
+ default:
+ break;
+ }
+
+ atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw);
+
+ return 0;
+}
+
+static void ab5500_usb_delayed_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ab5500_usb *ab = container_of(dwork, struct ab5500_usb, dwork);
+
+ ab5500_usb_link_status_update(ab);
+}
+
+/**
+ * This function is used to signal the completion of
+ * USB Link status register update
+ */
+static irqreturn_t ab5500_usb_link_status_irq(int irq, void *data)
+{
+ struct ab5500_usb *ab = (struct ab5500_usb *) data;
+ ab5500_usb_link_status_update(ab);
+
+ return IRQ_HANDLED;
+}
+
+
+
+
+static void ab5500_usb_irq_free(struct ab5500_usb *ab)
+{
+ if (ab->irq_num_link_status)
+ free_irq(ab->irq_num_link_status, ab);
+}
+
+/**
+ * ab5500_usb_irq_setup : register USB callback handlers for ab5500
+ * @mode: value for mode.
+ *
+ * This function is used to register USB callback handlers for ab5500.
+ */
+static int ab5500_usb_irq_setup(struct platform_device *pdev,
+ struct ab5500_usb *ab)
+{
+ int ret = 0;
+ int irq, err;
+
+ if (!ab->dev)
+ return -EINVAL;
+
+
+ irq = platform_get_irq_byname(pdev, "Link_Update");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Link Update irq not found\n");
+ err = irq;
+ goto irq_fail;
+ }
+ ab->irq_num_link_status = irq;
+
+ ret = request_threaded_irq(ab->irq_num_link_status,
+ NULL, ab5500_usb_link_status_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-link-status-update", ab);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to set the callback"
+ " handler for usb charge"
+ " detect done\n");
+ err = ret;
+ goto irq_fail;
+ }
+
+ ab5500_usb_wd_workaround(ab);
+ return 0;
+
+irq_fail:
+ ab5500_usb_irq_free(ab);
+ return err;
+}
+
+/* Sys interfaces */
+static ssize_t
+serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 bufer[5];
+ void __iomem *backup_ram = NULL;
+ backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14);
+
+ if (backup_ram) {
+ bufer[0] = readl(backup_ram);
+ bufer[1] = readl(backup_ram + 4);
+ bufer[2] = readl(backup_ram + 8);
+ bufer[3] = readl(backup_ram + 0x0c);
+ bufer[4] = readl(backup_ram + 0x10);
+
+ snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1,
+ "%.8X%.8X%.8X%.8X%.8X",
+ bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]);
+
+ iounmap(backup_ram);
+ } else
+ dev_err(dev, "$$\n");
+
+ return strlen(buf);
+}
+
+
+static DEVICE_ATTR(serial_number, 0644, serial_number_show, NULL);
+
+static struct attribute *ab5500_usb_attributes[] = {
+ &dev_attr_serial_number.attr,
+ NULL
+};
+static const struct attribute_group ab5500_attr_group = {
+ .attrs = ab5500_usb_attributes,
+};
+
+static int ab5500_create_sysfsentries(struct ab5500_usb *ab)
+{
+ int err;
+
+ err = sysfs_create_group(&ab->dev->kobj, &ab5500_attr_group);
+ if (err)
+ sysfs_remove_group(&ab->dev->kobj, &ab5500_attr_group);
+
+ return err;
+}
+
+/**
+ * ab5500_usb_boot_detect : detect the USB cable during boot time.
+ * @mode: value for mode.
+ *
+ * This function is used to detect the USB cable during boot time.
+ */
+static int ab5500_usb_boot_detect(struct ab5500_usb *ab)
+{
+ int usb_status = 0;
+ enum ab5500_usb_link_status lsts;
+ if (!ab->dev)
+ return -EINVAL;
+
+ (void)abx500_get_register_interruptible(ab->dev,
+ AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &usb_status);
+
+ if (ab->rev >= AB5500_2_0)
+ lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V2) >> 3;
+ else
+ lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V1) >> 3;
+
+ switch (lsts) {
+
+ case USB_LINK_STD_HOST_NC:
+ case USB_LINK_STD_HOST_C_NS:
+ case USB_LINK_STD_HOST_C_S:
+ case USB_LINK_HOST_CHG_NM:
+ case USB_LINK_HOST_CHG_HS:
+ case USB_LINK_HOST_CHG_HS_CHIRP:
+ /*
+ * If Power on key was not pressed then enter charge only
+ * mode and dont enumerate
+ */
+ if ((!(ab5500_get_turn_on_status() &
+ (P_ON_KEY1_EVENT | P_ON_KEY2_EVENT))) &&
+ (prcmu_get_reset_code() ==
+ SW_RESET_COLDSTART)) {
+ dev_dbg(ab->dev, "USB entered charge only mode");
+ return 0;
+ }
+ ab5500_usb_peri_phy_en(ab);
+
+ break;
+
+ case USB_LINK_HM_IDGND:
+ case USB_LINK_HM_IDGND_V2:
+ ab5500_usb_host_phy_en(ab);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ab5500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ ab->vbus_draw = mA;
+
+ atomic_notifier_call_chain(&ab->otg.notifier,
+ USB_EVENT_VBUS, &ab->vbus_draw);
+ return 0;
+}
+
+static int ab5500_usb_set_suspend(struct otg_transceiver *x, int suspend)
+{
+ /* TODO */
+ return 0;
+}
+
+static int ab5500_usb_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ /* Some drivers call this function in atomic context.
+ * Do not update ab5500 registers directly till this
+ * is fixed.
+ */
+
+ if (!host) {
+ ab->otg.host = NULL;
+ schedule_work(&ab->phy_dis_work);
+ } else {
+ ab->otg.host = host;
+ }
+
+ return 0;
+}
+
+static int ab5500_usb_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ struct ab5500_usb *ab;
+
+ if (!otg)
+ return -ENODEV;
+
+ ab = xceiv_to_ab(otg);
+
+ /* Some drivers call this function in atomic context.
+ * Do not update ab5500 registers directly till this
+ * is fixed.
+ */
+
+ if (!gadget) {
+ ab->otg.gadget = NULL;
+ schedule_work(&ab->phy_dis_work);
+ } else {
+ ab->otg.gadget = gadget;
+ }
+
+ return 0;
+}
+
+static int __devinit ab5500_usb_probe(struct platform_device *pdev)
+{
+ struct ab5500_usb *ab;
+ struct abx500_usbgpio_platform_data *usb_pdata =
+ pdev->dev.platform_data;
+ int err;
+ int ret = -1;
+ ab = kzalloc(sizeof *ab, GFP_KERNEL);
+ if (!ab)
+ return -ENOMEM;
+
+ ab->dev = &pdev->dev;
+ ab->otg.dev = ab->dev;
+ ab->otg.label = "ab5500";
+ ab->otg.state = OTG_STATE_B_IDLE;
+ ab->otg.set_host = ab5500_usb_set_host;
+ ab->otg.set_peripheral = ab5500_usb_set_peripheral;
+ ab->otg.set_suspend = ab5500_usb_set_suspend;
+ ab->otg.set_power = ab5500_usb_set_power;
+ ab->usb_gpio = usb_pdata;
+ ab->mode = USB_IDLE;
+
+ platform_set_drvdata(pdev, ab);
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
+
+ /* v1: Wait for link status to become stable.
+ * all: Updates form set_host and set_peripheral as they are atomic.
+ */
+ INIT_DELAYED_WORK(&ab->dwork, ab5500_usb_delayed_work);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&ab->work_usb_workaround,
+ ab5500_usb_load);
+
+ err = otg_set_transceiver(&ab->otg);
+ if (err)
+ dev_err(&pdev->dev, "Can't register transceiver\n");
+
+ ab->usb_cs_gpio = ab->usb_gpio->usb_cs;
+
+ ab->rev = abx500_get_chip_id(ab->dev);
+
+ ab->sysclk = clk_get(ab->dev, "sysclk");
+ if (IS_ERR(ab->sysclk)) {
+ ret = PTR_ERR(ab->sysclk);
+ ab->sysclk = NULL;
+ return ret;
+ }
+
+ ab->v_ape = regulator_get(ab->dev, "v-ape");
+ if (!ab->v_ape) {
+ dev_err(ab->dev, "Could not get v-ape supply\n");
+
+ return -EINVAL;
+ }
+
+ ab5500_usb_irq_setup(pdev, ab);
+
+ ret = gpio_request(ab->usb_cs_gpio, "usb-cs");
+ if (ret < 0)
+ dev_err(&pdev->dev, "usb gpio request fail\n");
+
+ /* Aquire GPIO alternate config struct for USB */
+ err = ab->usb_gpio->get(ab->dev);
+ if (err < 0)
+ goto fail1;
+
+ err = ab5500_usb_boot_detect(ab);
+ if (err < 0)
+ goto fail1;
+
+ err = ab5500_create_sysfsentries(ab);
+ if (err < 0)
+ dev_err(ab->dev, "usb create sysfs entries failed\n");
+
+ return 0;
+
+fail1:
+ ab5500_usb_irq_free(ab);
+ kfree(ab);
+ return err;
+}
+
+static int __devexit ab5500_usb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ab5500_usb_driver = {
+ .driver = {
+ .name = "ab5500-usb",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab5500_usb_probe,
+ .remove = __devexit_p(ab5500_usb_remove),
+};
+
+static int __init ab5500_usb_init(void)
+{
+ return platform_driver_register(&ab5500_usb_driver);
+}
+subsys_initcall(ab5500_usb_init);
+
+static void __exit ab5500_usb_exit(void)
+{
+ platform_driver_unregister(&ab5500_usb_driver);
+}
+module_exit(ab5500_usb_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
index a84af677dc5..49c5d31c150 100644
--- a/drivers/usb/otg/ab8500-usb.c
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -29,47 +29,117 @@
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/kernel_stat.h>
+#include <linux/pm_qos.h>
#define AB8500_MAIN_WD_CTRL_REG 0x01
#define AB8500_USB_LINE_STAT_REG 0x80
#define AB8500_USB_PHY_CTRL_REG 0x8A
+#define AB8500_VBUS_CTRL_REG 0x82
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE20_REG 0x13
+#define AB8500_SRC_INT_USB_HOST 0x04
+#define AB8500_SRC_INT_USB_DEVICE 0x80
#define AB8500_BIT_OTG_STAT_ID (1 << 0)
#define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
#define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
#define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
#define AB8500_BIT_WD_CTRL_KICK (1 << 1)
+#define AB8500_BIT_VBUS_ENABLE (1 << 0)
#define AB8500_V1x_LINK_STAT_WAIT (HZ/10)
#define AB8500_WD_KICK_DELAY_US 100 /* usec */
#define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
+#define AB8500_V20_31952_DISABLE_DELAY_US 100 /* usec */
#define AB8500_WD_V10_DISABLE_DELAY_MS 100 /* ms */
+/* Registers in bank 0x11 */
+#define AB8500_BANK12_ACCESS 0x00
+
+/* Registers in bank 0x12 */
+#define AB8500_USB_PHY_TUNE1 0x05
+#define AB8500_USB_PHY_TUNE2 0x06
+#define AB8500_USB_PHY_TUNE3 0x07
+
+static struct pm_qos_request usb_pm_qos_latency;
+static bool usb_pm_qos_is_latency_0;
+
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+#define USB_LIMIT (200) /* If we have more than 200 irqs per second */
+
+#define PUBLIC_ID_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0x0FC0)
+#define MAX_USB_SERIAL_NUMBER_LEN 31
+#define AB8505_USB_LINE_STAT_REG 0x94
+
/* Usb line status register */
enum ab8500_usb_link_status {
- USB_LINK_NOT_CONFIGURED = 0,
- USB_LINK_STD_HOST_NC,
- USB_LINK_STD_HOST_C_NS,
- USB_LINK_STD_HOST_C_S,
- USB_LINK_HOST_CHG_NM,
- USB_LINK_HOST_CHG_HS,
- USB_LINK_HOST_CHG_HS_CHIRP,
- USB_LINK_DEDICATED_CHG,
- USB_LINK_ACA_RID_A,
- USB_LINK_ACA_RID_B,
- USB_LINK_ACA_RID_C_NM,
- USB_LINK_ACA_RID_C_HS,
- USB_LINK_ACA_RID_C_HS_CHIRP,
- USB_LINK_HM_IDGND,
- USB_LINK_RESERVED,
- USB_LINK_NOT_VALID_LINK
+ USB_LINK_NOT_CONFIGURED_8500 = 0,
+ USB_LINK_STD_HOST_NC_8500,
+ USB_LINK_STD_HOST_C_NS_8500,
+ USB_LINK_STD_HOST_C_S_8500,
+ USB_LINK_HOST_CHG_NM_8500,
+ USB_LINK_HOST_CHG_HS_8500,
+ USB_LINK_HOST_CHG_HS_CHIRP_8500,
+ USB_LINK_DEDICATED_CHG_8500,
+ USB_LINK_ACA_RID_A_8500,
+ USB_LINK_ACA_RID_B_8500,
+ USB_LINK_ACA_RID_C_NM_8500,
+ USB_LINK_ACA_RID_C_HS_8500,
+ USB_LINK_ACA_RID_C_HS_CHIRP_8500,
+ USB_LINK_HM_IDGND_8500,
+ USB_LINK_RESERVED_8500,
+ USB_LINK_NOT_VALID_LINK_8500,
+};
+
+enum ab8505_usb_link_status {
+ USB_LINK_NOT_CONFIGURED_8505 = 0,
+ USB_LINK_STD_HOST_NC_8505,
+ USB_LINK_STD_HOST_C_NS_8505,
+ USB_LINK_STD_HOST_C_S_8505,
+ USB_LINK_CDP_8505,
+ USB_LINK_RESERVED0_8505,
+ USB_LINK_RESERVED1_8505,
+ USB_LINK_DEDICATED_CHG_8505,
+ USB_LINK_ACA_RID_A_8505,
+ USB_LINK_ACA_RID_B_8505,
+ USB_LINK_ACA_RID_C_NM_8505,
+ USB_LINK_RESERVED2_8505,
+ USB_LINK_RESERVED3_8505,
+ USB_LINK_HM_IDGND_8505,
+ USB_LINK_CHARGERPORT_NOT_OK_8505,
+ USB_LINK_CHARGER_DM_HIGH_8505,
+ USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8505,
+ USB_LINK_STD_UPSTREAM_NO_IDGNG_NO_VBUS_8505,
+ USB_LINK_STD_UPSTREAM_8505,
+ USB_LINK_CHARGER_SE1_8505,
+ USB_LINK_CARKIT_CHGR_1_8505,
+ USB_LINK_CARKIT_CHGR_2_8505,
+ USB_LINK_ACA_DOCK_CHGR_8505,
+ USB_LINK_SAMSUNG_BOOT_CBL_PHY_EN_8505,
+ USB_LINK_SAMSUNG_BOOT_CBL_PHY_DISB_8505,
+ USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505,
+ USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505,
+ USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_8505,
+};
+
+enum ab8500_usb_mode {
+ USB_IDLE = 0,
+ USB_PERIPHERAL,
+ USB_HOST,
+ USB_DEDICATED_CHG
};
struct ab8500_usb {
struct usb_phy phy;
struct device *dev;
+ struct ab8500 *ab8500;
int irq_num_id_rise;
int irq_num_id_fall;
int irq_num_vbus_rise;
@@ -79,7 +149,13 @@ struct ab8500_usb {
struct delayed_work dwork;
struct work_struct phy_dis_work;
unsigned long link_status_wait;
- int rev;
+ enum ab8500_usb_mode mode;
+ struct clk *sysclk;
+ struct regulator *v_ape;
+ struct regulator *v_musb;
+ struct regulator *v_ulpi;
+ struct delayed_work work_usb_workaround;
+ bool sysfs_flag;
};
static inline struct ab8500_usb *phy_to_ab(struct usb_phy *x)
@@ -102,10 +178,8 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
(AB8500_BIT_WD_CTRL_ENABLE
| AB8500_BIT_WD_CTRL_KICK));
- if (ab->rev > 0x10) /* v1.1 v2.0 */
+ if (!is_ab8500_1p0_or_earlier(ab->ab8500))
udelay(AB8500_WD_V11_DISABLE_DELAY_US);
- else /* v1.0 */
- msleep(AB8500_WD_V10_DISABLE_DELAY_MS);
abx500_set_register_interruptible(ab->dev,
AB8500_SYS_CTRL2_BLOCK,
@@ -113,146 +187,412 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
0);
}
-static void ab8500_usb_phy_ctrl(struct ab8500_usb *ab, bool sel_host,
+static void ab8500_usb_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+ struct delayed_work *work_usb_workaround = to_delayed_work(work);
+ struct ab8500_usb *ab = container_of(work_usb_workaround,
+ struct ab8500_usb, work_usb_workaround);
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > USB_LIMIT) {
+
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 125);
+ if (!usb_pm_qos_is_latency_0) {
+
+ pm_qos_add_request(&usb_pm_qos_latency,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+ usb_pm_qos_is_latency_0 = true;
+ }
+ } else {
+
+ if (usb_pm_qos_is_latency_0) {
+
+ pm_qos_remove_request(&usb_pm_qos_latency);
+ usb_pm_qos_is_latency_0 = false;
+ }
+
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 25);
+ }
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+}
+
+static void ab8500_usb_regulator_ctrl(struct ab8500_usb *ab, bool sel_host,
bool enable)
{
- u8 ctrl_reg;
- abx500_get_register_interruptible(ab->dev,
- AB8500_USB,
- AB8500_USB_PHY_CTRL_REG,
- &ctrl_reg);
- if (sel_host) {
- if (enable)
- ctrl_reg |= AB8500_BIT_PHY_CTRL_HOST_EN;
- else
- ctrl_reg &= ~AB8500_BIT_PHY_CTRL_HOST_EN;
+ int ret = 0, volt = 0;
+
+ if (enable) {
+ regulator_enable(ab->v_ape);
+ if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
+ ret = regulator_set_voltage(ab->v_ulpi,
+ 1300000, 1350000);
+ if (ret < 0)
+ dev_err(ab->dev, "Failed to set the Vintcore"
+ " to 1.3V, ret=%d\n", ret);
+ ret = regulator_set_optimum_mode(ab->v_ulpi,
+ 28000);
+ if (ret < 0)
+ dev_err(ab->dev, "Failed to set optimum mode"
+ " (ret=%d)\n", ret);
+
+ }
+ regulator_enable(ab->v_ulpi);
+ if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
+ volt = regulator_get_voltage(ab->v_ulpi);
+ if ((volt != 1300000) && (volt != 1350000))
+ dev_err(ab->dev, "Vintcore is not"
+ " set to 1.3V"
+ " volt=%d\n", volt);
+ }
+ regulator_enable(ab->v_musb);
+
} else {
- if (enable)
- ctrl_reg |= AB8500_BIT_PHY_CTRL_DEVICE_EN;
- else
- ctrl_reg &= ~AB8500_BIT_PHY_CTRL_DEVICE_EN;
+ regulator_disable(ab->v_musb);
+ regulator_disable(ab->v_ulpi);
+ regulator_disable(ab->v_ape);
}
+}
- abx500_set_register_interruptible(ab->dev,
+
+static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host)
+{
+ u8 bit;
+ bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
+ AB8500_BIT_PHY_CTRL_DEVICE_EN;
+
+ clk_enable(ab->sysclk);
+
+ ab8500_usb_regulator_ctrl(ab, sel_host, true);
+
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 100);
+
+ schedule_delayed_work_on(0,
+ &ab->work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
AB8500_USB,
AB8500_USB_PHY_CTRL_REG,
- ctrl_reg);
+ bit,
+ bit);
- /* Needed to enable the phy.*/
- if (enable)
- ab8500_usb_wd_workaround(ab);
}
-#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_ctrl(ab, true, true)
-#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_ctrl(ab, true, false)
-#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_ctrl(ab, false, true)
-#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_ctrl(ab, false, false)
+static void ab8500_usb_wd_linkstatus(struct ab8500_usb *ab,u8 bit)
+{
+ /* Workaround for v2.0 bug # 31952 */
+ if (is_ab8500_2p0(ab->ab8500)) {
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ bit,
+ bit);
+ udelay(AB8500_V20_31952_DISABLE_DELAY_US);
+ }
+}
-static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
+static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host)
{
- u8 reg;
- enum ab8500_usb_link_status lsts;
- void *v = NULL;
- enum usb_phy_events event;
+ u8 bit;
+ bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN :
+ AB8500_BIT_PHY_CTRL_DEVICE_EN;
+
+ ab8500_usb_wd_linkstatus(ab,bit);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ bit,
+ 0);
+
+ /* Needed to disable the phy.*/
+ ab8500_usb_wd_workaround(ab);
+
+ clk_disable(ab->sysclk);
+
+ ab8500_usb_regulator_ctrl(ab, sel_host, false);
+
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 50);
+
+ if (!sel_host) {
+
+ cancel_delayed_work_sync(&ab->work_usb_workaround);
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usb", 25);
+ }
+}
+
+#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_enable(ab, true)
+#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_disable(ab, true)
+#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_enable(ab, false)
+#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_disable(ab, false)
- abx500_get_register_interruptible(ab->dev,
- AB8500_USB,
- AB8500_USB_LINE_STAT_REG,
- &reg);
+static int ab8505_usb_link_status_update(struct ab8500_usb *ab,
+ enum ab8505_usb_link_status lsts)
+{
+ enum usb_phy_events event=0;
- lsts = (reg >> 3) & 0x0F;
+ dev_dbg(ab->dev, "ab8505_usb_link_status_update %d\n", lsts);
switch (lsts) {
- case USB_LINK_NOT_CONFIGURED:
- case USB_LINK_RESERVED:
- case USB_LINK_NOT_VALID_LINK:
- /* TODO: Disable regulators. */
- ab8500_usb_host_phy_dis(ab);
- ab8500_usb_peri_phy_dis(ab);
- ab->phy.state = OTG_STATE_B_IDLE;
+ case USB_LINK_ACA_RID_B_8505:
+ event = USB_EVENT_RIDB;
+ case USB_LINK_NOT_CONFIGURED_8505:
+ case USB_LINK_RESERVED0_8505:
+ case USB_LINK_RESERVED1_8505:
+ case USB_LINK_RESERVED2_8505:
+ case USB_LINK_RESERVED3_8505:
+ if (ab->mode == USB_PERIPHERAL)
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_CLEAN,
+ &ab->vbus_draw);
+ ab->mode = USB_IDLE;
ab->phy.otg->default_a = false;
ab->vbus_draw = 0;
- event = USB_EVENT_NONE;
+ if (event != USB_EVENT_RIDB)
+ event = USB_EVENT_NONE;
break;
- case USB_LINK_STD_HOST_NC:
- case USB_LINK_STD_HOST_C_NS:
- case USB_LINK_STD_HOST_C_S:
- case USB_LINK_HOST_CHG_NM:
- case USB_LINK_HOST_CHG_HS:
- case USB_LINK_HOST_CHG_HS_CHIRP:
- if (ab->phy.otg->gadget) {
- /* TODO: Enable regulators. */
+ case USB_LINK_ACA_RID_C_NM_8505:
+ event = USB_EVENT_RIDC;
+ case USB_LINK_STD_HOST_NC_8505:
+ case USB_LINK_STD_HOST_C_NS_8505:
+ case USB_LINK_STD_HOST_C_S_8505:
+ if (ab->mode == USB_HOST) {
+ ab->mode = USB_PERIPHERAL;
+ ab8500_usb_host_phy_dis(ab);
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_CLEAN,
+ &ab->vbus_draw);
ab8500_usb_peri_phy_en(ab);
- v = ab->phy.otg->gadget;
}
- event = USB_EVENT_VBUS;
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_PERIPHERAL;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+ ab8500_usb_peri_phy_en(ab);
+ }
+ if (event != USB_EVENT_RIDC)
+ event = USB_EVENT_VBUS;
break;
-
- case USB_LINK_HM_IDGND:
- if (ab->phy.otg->host) {
- /* TODO: Enable regulators. */
+ case USB_LINK_ACA_RID_A_8505:
+ event = USB_EVENT_RIDA;
+ case USB_LINK_HM_IDGND_8505:
+ if (ab->mode == USB_PERIPHERAL) {
+ ab->mode = USB_HOST;
+ ab8500_usb_peri_phy_dis(ab);
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+ ab8500_usb_host_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_HOST;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
ab8500_usb_host_phy_en(ab);
- v = ab->phy.otg->host;
}
- ab->phy.state = OTG_STATE_A_IDLE;
ab->phy.otg->default_a = true;
- event = USB_EVENT_ID;
+ if (event != USB_EVENT_RIDA)
+ event = USB_EVENT_ID;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ event,
+ &ab->vbus_draw);
break;
- case USB_LINK_ACA_RID_A:
- case USB_LINK_ACA_RID_B:
- /* TODO */
- case USB_LINK_ACA_RID_C_NM:
- case USB_LINK_ACA_RID_C_HS:
- case USB_LINK_ACA_RID_C_HS_CHIRP:
- case USB_LINK_DEDICATED_CHG:
- /* TODO: vbus_draw */
- event = USB_EVENT_CHARGER;
+ case USB_LINK_DEDICATED_CHG_8505:
+ ab->mode = USB_DEDICATED_CHG;
+ event = USB_EVENT_CHARGER;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ event,
+ &ab->vbus_draw);
break;
- }
-
- atomic_notifier_call_chain(&ab->phy.notifier, event, v);
+ default:
+ break;
+ }
return 0;
}
-static void ab8500_usb_delayed_work(struct work_struct *work)
+static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
+ enum ab8500_usb_link_status lsts)
{
- struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
- dwork.work);
+ enum usb_phy_events event=0;
- ab8500_usb_link_status_update(ab);
+ dev_dbg(ab->dev, "ab8500_usb_link_status_update %d\n", lsts);
+
+ switch (lsts) {
+ case USB_LINK_ACA_RID_B_8500:
+ event = USB_EVENT_RIDB;
+ case USB_LINK_NOT_CONFIGURED_8500:
+ case USB_LINK_RESERVED_8500:
+ case USB_LINK_NOT_VALID_LINK_8500:
+ if (ab->mode == USB_PERIPHERAL)
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_CLEAN,
+ &ab->vbus_draw);
+ ab->mode = USB_IDLE;
+ ab->phy.otg->default_a = false;
+ ab->vbus_draw = 0;
+ if (event != USB_EVENT_RIDB)
+ event = USB_EVENT_NONE;
+ break;
+ case USB_LINK_ACA_RID_C_NM_8500:
+ case USB_LINK_ACA_RID_C_HS_8500:
+ case USB_LINK_ACA_RID_C_HS_CHIRP_8500:
+ event = USB_EVENT_RIDC;
+ case USB_LINK_STD_HOST_NC_8500:
+ case USB_LINK_STD_HOST_C_NS_8500:
+ case USB_LINK_STD_HOST_C_S_8500:
+ case USB_LINK_HOST_CHG_NM_8500:
+ case USB_LINK_HOST_CHG_HS_8500:
+ case USB_LINK_HOST_CHG_HS_CHIRP_8500:
+ if (ab->mode == USB_HOST) {
+ ab->mode = USB_PERIPHERAL;
+ ab8500_usb_host_phy_dis(ab);
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+ ab8500_usb_peri_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_PERIPHERAL;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+ ab8500_usb_peri_phy_en(ab);
+ }
+ if (event != USB_EVENT_RIDC)
+ event = USB_EVENT_VBUS;
+ break;
+
+ case USB_LINK_ACA_RID_A_8500:
+ event = USB_EVENT_RIDA;
+ case USB_LINK_HM_IDGND_8500:
+ if (ab->mode == USB_PERIPHERAL) {
+ ab->mode = USB_HOST;
+ ab8500_usb_peri_phy_dis(ab);
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+ ab8500_usb_host_phy_en(ab);
+ }
+ if (ab->mode == USB_IDLE) {
+ ab->mode = USB_HOST;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+ ab8500_usb_host_phy_en(ab);
+ }
+ ab->phy.otg->default_a = true;
+ if (event != USB_EVENT_RIDA)
+ event = USB_EVENT_ID;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ event,
+ &ab->vbus_draw);
+ break;
+
+ case USB_LINK_DEDICATED_CHG_8500:
+ ab->mode = USB_DEDICATED_CHG;
+ event = USB_EVENT_CHARGER;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ event,
+ &ab->vbus_draw);
+ break;
+ }
+ return 0;
}
-static irqreturn_t ab8500_usb_v1x_common_irq(int irq, void *data)
+static int abx500_usb_link_status_update(struct ab8500_usb *ab)
{
- struct ab8500_usb *ab = (struct ab8500_usb *) data;
+ u8 reg;
+ int ret = 0;
- /* Wait for link status to become stable. */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+ if (!(ab->sysfs_flag)) {
+ if (is_ab8500(ab->ab8500)) {
+ enum ab8500_usb_link_status lsts;
- return IRQ_HANDLED;
+ abx500_get_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_LINE_STAT_REG,
+ &reg);
+ lsts = (reg >> 3) & 0x0F;
+ ret = ab8500_usb_link_status_update(ab, lsts);
+ }
+ if (is_ab8505(ab->ab8500)) {
+ enum ab8505_usb_link_status lsts;
+
+ abx500_get_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8505_USB_LINE_STAT_REG,
+ &reg);
+ lsts = (reg >> 3) & 0x1F;
+ ret = ab8505_usb_link_status_update(ab, lsts);
+ }
+ }
+ return ret;
+}
+
+static void ab8500_usb_delayed_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ab8500_usb *ab = container_of(dwork, struct ab8500_usb, dwork);
+ abx500_usb_link_status_update(ab);
}
-static irqreturn_t ab8500_usb_v1x_vbus_fall_irq(int irq, void *data)
+static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
+ enum usb_phy_events event = USB_EVENT_NONE;
/* Link status will not be updated till phy is disabled. */
- ab8500_usb_peri_phy_dis(ab);
-
- /* Wait for link status to become stable. */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+ if (ab->mode == USB_HOST) {
+ ab->phy.otg->default_a = false;
+ ab->vbus_draw = 0;
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ event, &ab->vbus_draw);
+ ab8500_usb_host_phy_dis(ab);
+ }
+ if (ab->mode == USB_PERIPHERAL) {
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ event, &ab->vbus_draw);
+ ab8500_usb_peri_phy_dis(ab);
+ }
+ if (is_ab8500_2p0(ab->ab8500)) {
+ if (ab->mode == USB_DEDICATED_CHG) {
+ ab8500_usb_wd_linkstatus(ab, AB8500_BIT_PHY_CTRL_DEVICE_EN);
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ 0);
+ }
+ }
return IRQ_HANDLED;
}
-static irqreturn_t ab8500_usb_v20_irq(int irq, void *data)
+static irqreturn_t ab8500_usb_v20_link_status_irq(int irq, void *data)
{
struct ab8500_usb *ab = (struct ab8500_usb *) data;
- ab8500_usb_link_status_update(ab);
+ abx500_usb_link_status_update(ab);
return IRQ_HANDLED;
}
@@ -267,8 +607,42 @@ static void ab8500_usb_phy_disable_work(struct work_struct *work)
if (!ab->phy.otg->gadget)
ab8500_usb_peri_phy_dis(ab);
+
}
+static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA)
+{
+ /* AB V2 has eye diagram issues when drawing more
+ * than 100mA from VBUS.So setting charging current
+ * to 100mA in case of standard host
+ */
+ if (is_ab8500_2p0_or_earlier(ab->ab8500))
+ if (mA > 100)
+ mA = 100;
+
+ return mA;
+}
+
+#ifdef CONFIG_USB_OTG_20
+static int ab8500_usb_start_srp(struct usb_phy *phy, unsigned mA)
+{
+ struct ab8500_usb *ab;
+
+ if (!phy)
+ return -ENODEV;
+
+ ab = phy_to_ab(phy);
+
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_PREPARE,
+ &ab->vbus_draw);
+
+ ab8500_usb_peri_phy_en(ab);
+
+ return 0;
+}
+#endif
+
static int ab8500_usb_set_power(struct usb_phy *phy, unsigned mA)
{
struct ab8500_usb *ab;
@@ -278,18 +652,15 @@ static int ab8500_usb_set_power(struct usb_phy *phy, unsigned mA)
ab = phy_to_ab(phy);
+ mA = ab8500_eyediagram_workaroud(ab, mA);
+
ab->vbus_draw = mA;
- if (mA)
- atomic_notifier_call_chain(&ab->phy.notifier,
- USB_EVENT_ENUMERATED, ab->phy.otg->gadget);
+ atomic_notifier_call_chain(&ab->phy.notifier,
+ USB_EVENT_VBUS, &ab->vbus_draw);
return 0;
}
-/* TODO: Implement some way for charging or other drivers to read
- * ab->vbus_draw.
- */
-
static int ab8500_usb_set_suspend(struct usb_phy *x, int suspend)
{
/* TODO */
@@ -306,25 +677,13 @@ static int ab8500_usb_set_peripheral(struct usb_otg *otg,
ab = phy_to_ab(otg->phy);
+ ab->phy.otg->gadget = gadget;
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
-
- if (!gadget) {
- /* TODO: Disable regulators. */
- otg->gadget = NULL;
+ if (!gadget)
schedule_work(&ab->phy_dis_work);
- } else {
- otg->gadget = gadget;
- otg->phy->state = OTG_STATE_B_IDLE;
-
- /* Phy will not be enabled if cable is already
- * plugged-in. Schedule to enable phy.
- * Use same delay to avoid any race condition.
- */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
- }
return 0;
}
@@ -338,22 +697,93 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
ab = phy_to_ab(otg->phy);
+ ab->phy.otg->host = host;
+
/* Some drivers call this function in atomic context.
* Do not update ab8500 registers directly till this
* is fixed.
*/
-
- if (!host) {
- /* TODO: Disable regulators. */
- otg->host = NULL;
+ if (!host)
schedule_work(&ab->phy_dis_work);
- } else {
- otg->host = host;
- /* Phy will not be enabled if cable is already
- * plugged-in. Schedule to enable phy.
- * Use same delay to avoid any race condition.
- */
- schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+
+ return 0;
+}
+/**
+ * ab8500_usb_boot_detect : detect the USB cable during boot time.
+ * @device: value for device.
+ *
+ * This function is used to detect the USB cable during boot time.
+ */
+static int ab8500_usb_boot_detect(struct ab8500_usb *ab)
+{
+ /* Disabling PHY before selective enable or disable */
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN);
+
+ udelay(100);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_DEVICE_EN,
+ 0);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_HOST_EN,
+ AB8500_BIT_PHY_CTRL_HOST_EN);
+
+ udelay(100);
+
+ abx500_mask_and_set_register_interruptible(ab->dev,
+ AB8500_USB,
+ AB8500_USB_PHY_CTRL_REG,
+ AB8500_BIT_PHY_CTRL_HOST_EN,
+ 0);
+
+ return 0;
+}
+
+static void ab8500_usb_regulator_put(struct ab8500_usb *ab)
+{
+
+ if (ab->v_ape)
+ regulator_put(ab->v_ape);
+
+ if (ab->v_ulpi)
+ regulator_put(ab->v_ulpi);
+
+ if (ab->v_musb)
+ regulator_put(ab->v_musb);
+}
+
+static int ab8500_usb_regulator_get(struct ab8500_usb *ab)
+{
+ int err;
+
+ ab->v_ape = regulator_get(ab->dev, "v-ape");
+ if (IS_ERR(ab->v_ape)) {
+ dev_err(ab->dev, "Could not get v-ape supply\n");
+ err = PTR_ERR(ab->v_ape);
+ return err;
+ }
+
+ ab->v_ulpi = regulator_get(ab->dev, "vddulpivio18");
+ if (IS_ERR(ab->v_ulpi)) {
+ dev_err(ab->dev, "Could not get vddulpivio18 supply\n");
+ err = PTR_ERR(ab->v_ulpi);
+ return err;
+ }
+
+ ab->v_musb = regulator_get(ab->dev, "musb_1v8");
+ if (IS_ERR(ab->v_musb)) {
+ dev_err(ab->dev, "Could not get musb_1v8 supply\n");
+ err = PTR_ERR(ab->v_musb);
+ return err;
}
return 0;
@@ -361,126 +791,179 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
static void ab8500_usb_irq_free(struct ab8500_usb *ab)
{
- if (ab->rev < 0x20) {
+ if (ab->irq_num_id_rise)
free_irq(ab->irq_num_id_rise, ab);
+
+ if (ab->irq_num_id_fall)
free_irq(ab->irq_num_id_fall, ab);
+
+ if (ab->irq_num_vbus_rise)
free_irq(ab->irq_num_vbus_rise, ab);
+
+ if (ab->irq_num_vbus_fall)
free_irq(ab->irq_num_vbus_fall, ab);
- } else {
+
+ if (ab->irq_num_link_status)
free_irq(ab->irq_num_link_status, ab);
- }
}
-static int ab8500_usb_v1x_res_setup(struct platform_device *pdev,
+static int ab8500_usb_irq_setup(struct platform_device *pdev,
struct ab8500_usb *ab)
{
int err;
+ int irq;
+
+ if (!is_ab8500_1p0_or_earlier(ab->ab8500)) {
+ irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS");
+ if (irq < 0) {
+ err = irq;
+ dev_err(&pdev->dev, "Link status irq not found\n");
+ goto irq_fail;
+ }
- ab->irq_num_id_rise = platform_get_irq_byname(pdev, "ID_WAKEUP_R");
- if (ab->irq_num_id_rise < 0) {
- dev_err(&pdev->dev, "ID rise irq not found\n");
- return ab->irq_num_id_rise;
- }
- err = request_threaded_irq(ab->irq_num_id_rise, NULL,
- ab8500_usb_v1x_common_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-id-rise", ab);
- if (err < 0) {
- dev_err(ab->dev, "request_irq failed for ID rise irq\n");
- goto fail0;
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_v20_link_status_irq,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "usb-link-status", ab);
+ if (err < 0) {
+ dev_err(ab->dev,
+ "request_irq failed for link status irq\n");
+ return err;
+ }
+ ab->irq_num_link_status = irq;
}
- ab->irq_num_id_fall = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
- if (ab->irq_num_id_fall < 0) {
+ irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
+ if (irq < 0) {
+ err = irq;
dev_err(&pdev->dev, "ID fall irq not found\n");
return ab->irq_num_id_fall;
}
- err = request_threaded_irq(ab->irq_num_id_fall, NULL,
- ab8500_usb_v1x_common_irq,
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED,
"usb-id-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for ID fall irq\n");
- goto fail1;
+ goto irq_fail;
}
+ ab->irq_num_id_fall = irq;
- ab->irq_num_vbus_rise = platform_get_irq_byname(pdev, "VBUS_DET_R");
- if (ab->irq_num_vbus_rise < 0) {
- dev_err(&pdev->dev, "VBUS rise irq not found\n");
- return ab->irq_num_vbus_rise;
- }
- err = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
- ab8500_usb_v1x_common_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-vbus-rise", ab);
- if (err < 0) {
- dev_err(ab->dev, "request_irq failed for Vbus rise irq\n");
- goto fail2;
- }
-
- ab->irq_num_vbus_fall = platform_get_irq_byname(pdev, "VBUS_DET_F");
- if (ab->irq_num_vbus_fall < 0) {
+ irq = platform_get_irq_byname(pdev, "VBUS_DET_F");
+ if (irq < 0) {
+ err = irq;
dev_err(&pdev->dev, "VBUS fall irq not found\n");
- return ab->irq_num_vbus_fall;
+ goto irq_fail;
}
- err = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
- ab8500_usb_v1x_vbus_fall_irq,
+ err = request_threaded_irq(irq, NULL,
+ ab8500_usb_disconnect_irq,
IRQF_NO_SUSPEND | IRQF_SHARED,
"usb-vbus-fall", ab);
if (err < 0) {
dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
- goto fail3;
+ goto irq_fail;
}
+ ab->irq_num_vbus_fall = irq;
return 0;
-fail3:
- free_irq(ab->irq_num_vbus_rise, ab);
-fail2:
- free_irq(ab->irq_num_id_fall, ab);
-fail1:
- free_irq(ab->irq_num_id_rise, ab);
-fail0:
+
+irq_fail:
+ ab8500_usb_irq_free(ab);
return err;
}
-static int ab8500_usb_v2_res_setup(struct platform_device *pdev,
- struct ab8500_usb *ab)
+/* Sys interfaces */
+static ssize_t
+serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- int err;
+ u32 bufer[5];
+ void __iomem *backup_ram = NULL;
- ab->irq_num_link_status = platform_get_irq_byname(pdev,
- "USB_LINK_STATUS");
- if (ab->irq_num_link_status < 0) {
- dev_err(&pdev->dev, "Link status irq not found\n");
- return ab->irq_num_link_status;
- }
+ backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14);
- err = request_threaded_irq(ab->irq_num_link_status, NULL,
- ab8500_usb_v20_irq,
- IRQF_NO_SUSPEND | IRQF_SHARED,
- "usb-link-status", ab);
- if (err < 0) {
- dev_err(ab->dev,
- "request_irq failed for link status irq\n");
- return err;
- }
+ if (backup_ram) {
+ bufer[0] = readl(backup_ram);
+ bufer[1] = readl(backup_ram + 4);
+ bufer[2] = readl(backup_ram + 8);
+ bufer[3] = readl(backup_ram + 0x0c);
+ bufer[4] = readl(backup_ram + 0x10);
- return 0;
+ snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1,
+ "%.8X%.8X%.8X%.8X%.8X",
+ bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]);
+
+ iounmap(backup_ram);
+ } else
+ dev_err(dev, "$$\n");
+
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(serial_number, 0644, serial_number_show, NULL);
+
+static ssize_t
+boot_time_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ab8500_usb *ab = dev_get_drvdata(dev);
+ u8 val = ab->sysfs_flag;
+
+ snprintf(buf, 2, "%d", val);
+
+ return strlen(buf);
+}
+
+static ssize_t
+boot_time_device_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct ab8500_usb *ab = dev_get_drvdata(dev);
+
+ ab->sysfs_flag = false;
+
+ abx500_usb_link_status_update(ab);
+
+ return n;
+}
+static DEVICE_ATTR(boot_time_device, 0644,
+ boot_time_device_show, boot_time_device_store);
+
+
+static struct attribute *ab8500_usb_attributes[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_boot_time_device.attr,
+ NULL
+};
+static const struct attribute_group ab8500_attr_group = {
+ .attrs = ab8500_usb_attributes,
+};
+
+static int ab8500_create_sysfsentries(struct ab8500_usb *ab)
+{
+ int err;
+
+ err = sysfs_create_group(&ab->dev->kobj, &ab8500_attr_group);
+ if (err)
+ sysfs_remove_group(&ab->dev->kobj, &ab8500_attr_group);
+
+ return err;
}
static int __devinit ab8500_usb_probe(struct platform_device *pdev)
{
struct ab8500_usb *ab;
+ struct ab8500 *ab8500;
struct usb_otg *otg;
int err;
int rev;
+ int ret = -1;
+ ab8500 = dev_get_drvdata(pdev->dev.parent);
rev = abx500_get_chip_id(&pdev->dev);
- if (rev < 0) {
- dev_err(&pdev->dev, "Chip id read failed\n");
- return rev;
- } else if (rev < 0x10) {
- dev_err(&pdev->dev, "Unsupported AB8500 chip\n");
+
+ if (is_ab8500_1p1_or_earlier(ab8500)) {
+ dev_err(&pdev->dev, "Unsupported AB8500 chip rev=%d\n", rev);
return -ENODEV;
}
@@ -495,20 +978,24 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
}
ab->dev = &pdev->dev;
- ab->rev = rev;
+ ab->ab8500 = ab8500;
ab->phy.dev = ab->dev;
ab->phy.otg = otg;
ab->phy.label = "ab8500";
ab->phy.set_suspend = ab8500_usb_set_suspend;
ab->phy.set_power = ab8500_usb_set_power;
- ab->phy.state = OTG_STATE_UNDEFINED;
+ ab->phy.state = OTG_STATE_B_IDLE;
otg->phy = &ab->phy;
otg->set_host = ab8500_usb_set_host;
otg->set_peripheral = ab8500_usb_set_peripheral;
+#ifdef CONFIG_USB_OTG_20
+ ab->otg.start_srp = ab8500_usb_start_srp;
+#endif
+ ab->sysfs_flag = true;
platform_set_drvdata(pdev, ab);
-
+ dev_set_drvdata(ab->dev, ab);
ATOMIC_INIT_NOTIFIER_HEAD(&ab->phy.notifier);
/* v1: Wait for link status to become stable.
@@ -519,27 +1006,98 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev)
/* all: Disable phy when called from set_host and set_peripheral */
INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
- if (ab->rev < 0x20) {
- err = ab8500_usb_v1x_res_setup(pdev, ab);
- ab->link_status_wait = AB8500_V1x_LINK_STAT_WAIT;
- } else {
- err = ab8500_usb_v2_res_setup(pdev, ab);
+ INIT_DELAYED_WORK_DEFERRABLE(&ab->work_usb_workaround,
+ ab8500_usb_load);
+ err = ab8500_usb_regulator_get(ab);
+ if (err)
+ goto fail0;
+
+ ab->sysclk = clk_get(ab->dev, "sysclk");
+ if (IS_ERR(ab->sysclk)) {
+ err = PTR_ERR(ab->sysclk);
+ goto fail1;
}
+ err = ab8500_usb_irq_setup(pdev, ab);
if (err < 0)
- goto fail0;
+ goto fail2;
err = usb_set_transceiver(&ab->phy);
if (err) {
dev_err(&pdev->dev, "Can't register transceiver\n");
- goto fail1;
+ goto fail3;
}
- dev_info(&pdev->dev, "AB8500 usb driver initialized\n");
+ /* Write Phy tuning values */
+ if (!is_ab8500_2p0_or_earlier(ab->ab8500)) {
+ /* Enable the PBT/Bank 0x12 access */
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEVELOPMENT,
+ AB8500_BANK12_ACCESS,
+ 0x01);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to enable bank12"
+ " access ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE1,
+ 0xC8);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE1"
+ " register ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE2,
+ 0x00);
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE2"
+ " register ret=%d\n", ret);
+
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEBUG,
+ AB8500_USB_PHY_TUNE3,
+ 0x78);
+
+ if (ret < 0)
+ printk(KERN_ERR "Failed to set PHY_TUNE3"
+ " regester ret=%d\n", ret);
+
+ /* Switch to normal mode/disable Bank 0x12 access */
+ ret = abx500_set_register_interruptible(ab->dev,
+ AB8500_DEVELOPMENT,
+ AB8500_BANK12_ACCESS,
+ 0x00);
+
+ if (ret < 0)
+ printk(KERN_ERR "Failed to switch bank12"
+ " access ret=%d\n", ret);
+ }
+ /* Needed to enable ID detection. */
+ ab8500_usb_wd_workaround(ab);
+
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)dev_name(ab->dev), 50);
+ dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", rev);
+
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "usb", 25);
+
+ err = ab8500_usb_boot_detect(ab);
+ if (err < 0)
+ goto fail3;
+
+ err = ab8500_create_sysfsentries(ab);
+ if (err)
+ goto fail3;
return 0;
-fail1:
+fail3:
ab8500_usb_irq_free(ab);
+fail2:
+ clk_put(ab->sysclk);
+fail1:
+ ab8500_usb_regulator_put(ab);
fail0:
kfree(otg);
kfree(ab);
@@ -558,8 +1116,14 @@ static int __devexit ab8500_usb_remove(struct platform_device *pdev)
usb_set_transceiver(NULL);
- ab8500_usb_host_phy_dis(ab);
- ab8500_usb_peri_phy_dis(ab);
+ if (ab->mode == USB_HOST)
+ ab8500_usb_host_phy_dis(ab);
+ else if (ab->mode == USB_PERIPHERAL)
+ ab8500_usb_peri_phy_dis(ab);
+
+ clk_put(ab->sysclk);
+
+ ab8500_usb_regulator_put(ab);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a290be51a1f..c775ac2419d 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -23,6 +23,8 @@ source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/stub/Kconfig"
+source "drivers/gpu/mali/Kconfig"
+
config VGASTATE
tristate
default n
@@ -286,6 +288,8 @@ config FB_CIRRUS
Say N unless you have such a graphics board or plan to get one
before you next recompile the kernel.
+source "drivers/video/mcde/Kconfig"
+
config FB_PM2
tristate "Permedia2 support"
depends on FB && ((AMIGA && BROKEN) || PCI)
@@ -2415,6 +2419,8 @@ source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/exynos/Kconfig"
source "drivers/video/backlight/Kconfig"
+source "drivers/video/av8100/Kconfig"
+source "drivers/video/b2r2/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9356add945b..3a4ad2ebe33 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -139,6 +139,9 @@ obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
obj-y += omap2/
+obj-$(CONFIG_FB_MCDE) += mcde/
+obj-$(CONFIG_AV8100) += av8100/
+obj-y += b2r2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
diff --git a/drivers/video/av8100/Kconfig b/drivers/video/av8100/Kconfig
new file mode 100644
index 00000000000..40b9943aaa9
--- /dev/null
+++ b/drivers/video/av8100/Kconfig
@@ -0,0 +1,48 @@
+config AV8100
+ tristate "AV8100 driver support(HDMI/CVBS)"
+ default n
+ help
+ Please enable this feature if hdmi/tvout driver support is required.
+
+config HDMI_AV8100_DEBUG
+ bool "HDMI and AV8100 debug messages"
+ default n
+ depends on AV8100
+ ---help---
+ Say Y here if you want the HDMI and AV8100 driver to
+ output debug messages.
+
+choice
+ prompt "AV8100 HW trig method"
+ default AV8100_HWTRIG_DSI_TE
+
+config AV8100_HWTRIG_INT
+ bool "AV8100 HW trig on INT"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ from AV8100 INT to MCDE sync0.
+
+config AV8100_HWTRIG_I2SDAT3
+ bool "AV8100 HW trig on I2SDAT3"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ from AV8100 I2SDAT3 to MCDE sync1.
+
+config AV8100_HWTRIG_DSI_TE
+ bool "AV8100 HW trig on DSI"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use HW triggering
+ using DSI TE polling between AV8100 and MCDE.
+
+config AV8100_HWTRIG_NONE
+ bool "AV8100 SW trig"
+ depends on AV8100
+ ---help---
+ If you say Y here AV8100 will use SW triggering
+ between AV8100 and MCDE.
+
+endchoice
+
diff --git a/drivers/video/av8100/Makefile b/drivers/video/av8100/Makefile
new file mode 100644
index 00000000000..2d3028b18ca
--- /dev/null
+++ b/drivers/video/av8100/Makefile
@@ -0,0 +1,10 @@
+# Make file for compiling and loadable module HDMI
+
+obj-$(CONFIG_AV8100) += av8100.o hdmi.o
+
+ifdef CONFIG_HDMI_AV8100_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+clean-files := av8100.o hdmi.o built-in.o modules.order
+
diff --git a/drivers/video/av8100/av8100.c b/drivers/video/av8100/av8100.c
new file mode 100644
index 00000000000..d5d159079c3
--- /dev/null
+++ b/drivers/video/av8100/av8100.c
@@ -0,0 +1,4166 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * AV8100 driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/timer.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include "av8100_regs.h"
+#include <video/av8100.h>
+#include <video/hdmi.h>
+#include <linux/firmware.h>
+
+#define AV8100_FW_FILENAME "av8100.fw"
+#define CUT_STR_0 "2.1"
+#define CUT_STR_1 "2.2"
+#define CUT_STR_3 "2.3"
+#define CUT_STR_30 "3.0"
+#define CUT_STR_UNKNOWN ""
+#define AV8100_DEVNR_DEFAULT 0
+
+/* Interrupts */
+#define AV8100_INT_EVENT 0x1
+#define AV8100_PLUGSTARTUP_EVENT 0x4
+
+#define AV8100_PLUGSTARTUP_TIME 100
+
+/* Standby search time */
+#define AV8100_ON_TIME 1 /* 9 ms step */
+#define AV8100_DENC_OFF_TIME 3 /* 275 ms step if > V1. Not used if V1 */
+#define AV8100_HDMI_OFF_TIME 2 /* 140 ms step if V2. 80 ms step if V1 */
+
+/* Command offsets */
+#define AV8100_COMMAND_OFFSET 0x10
+#define AV8100_CUTVER_OFFSET 0x11
+#define AV8100_COMMAND_MAX_LENGTH 0x81
+#define AV8100_CMD_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_2ND_RET_BYTE_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_CEC_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 4)
+#define AV8100_HDCP_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_EDID_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1)
+#define AV8100_FUSE_CRC_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_FUSE_PRGD_OFFSET (AV8100_COMMAND_OFFSET + 3)
+#define AV8100_CRC32_OFFSET (AV8100_COMMAND_OFFSET + 2)
+#define AV8100_CEC_ADDR_OFFSET (AV8100_COMMAND_OFFSET + 3)
+
+/* Tearing effect line numbers */
+#define AV8100_TE_LINE_NB_14 14
+#define AV8100_TE_LINE_NB_17 17
+#define AV8100_TE_LINE_NB_18 18
+#define AV8100_TE_LINE_NB_21 21
+#define AV8100_TE_LINE_NB_22 22
+#define AV8100_TE_LINE_NB_24 24
+#define AV8100_TE_LINE_NB_25 25
+#define AV8100_TE_LINE_NB_26 26
+#define AV8100_TE_LINE_NB_29 29
+#define AV8100_TE_LINE_NB_30 30
+#define AV8100_TE_LINE_NB_32 32
+#define AV8100_TE_LINE_NB_38 38
+#define AV8100_TE_LINE_NB_40 40
+#define AV8100_UI_X4_DEFAULT 6
+
+#define HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT 2
+#define HDMI_CEC_MESSAGE_WRITE_BUFFER_SIZE 16
+#define HDMI_HDCP_SEND_KEY_SIZE 7
+#define HDMI_INFOFRAME_DATA_SIZE 28
+#define HDMI_FUSE_AES_KEY_SIZE 16
+#define HDMI_FUSE_AES_KEY_RET_SIZE 2
+#define HDMI_LOADAES_END_BLK_NR 145
+#define HDMI_CRC32_SIZE 4
+#define HDMI_HDCP_MGMT_BKSV_SIZE 5
+#define HDMI_HDCP_MGMT_SHA_SIZE 20
+#define HDMI_HDCP_MGMT_MAX_DEVICES_SIZE 20
+#define HDMI_HDCP_MGMT_DEVICE_MASK 0x7F
+#define HDMI_EDIDREAD_SIZE 0x7F
+
+#define HPDS_INVALID 0xF
+#define CPDS_INVALID 0xF
+#define CECRX_INVALID 0xF
+
+#define REG_16_8_LSB(p) ((u8)(p & 0xFF))
+#define REG_16_8_MSB(p) ((u8)((p & 0xFF00)>>8))
+#define REG_32_8_MSB(p) ((u8)((p & 0xFF000000)>>24))
+#define REG_32_8_MMSB(p) ((u8)((p & 0x00FF0000)>>16))
+#define REG_32_8_MLSB(p) ((u8)((p & 0x0000FF00)>>8))
+#define REG_32_8_LSB(p) ((u8)(p & 0x000000FF))
+#define REG_10_8_MSB(p) ((u8)((p & 0x300)>>8))
+#define REG_12_8_MSB(p) ((u8)((p & 0xf00)>>8))
+
+#define AV8100_WAITTIME_1MS 1
+#define AV8100_WAITTIME_5MS 5
+#define AV8100_WAITTIME_10MS 10
+#define AV8100_WAITTIME_50MS 50
+#define AV8100_WATTIME_100US 100
+
+static DEFINE_MUTEX(av8100_hw_mutex);
+#define LOCK_AV8100_HW mutex_lock(&av8100_hw_mutex)
+#define UNLOCK_AV8100_HW mutex_unlock(&av8100_hw_mutex)
+static DEFINE_MUTEX(av8100_fwdl_mutex);
+#define LOCK_AV8100_FWDL mutex_lock(&av8100_fwdl_mutex)
+#define UNLOCK_AV8100_FWDL mutex_unlock(&av8100_fwdl_mutex)
+
+struct color_conversion_cmd {
+ unsigned short c0;
+ unsigned short c1;
+ unsigned short c2;
+ unsigned short c3;
+ unsigned short c4;
+ unsigned short c5;
+ unsigned short c6;
+ unsigned short c7;
+ unsigned short c8;
+ unsigned short aoffset;
+ unsigned short boffset;
+ unsigned short coffset;
+ unsigned char lmax;
+ unsigned char lmin;
+ unsigned char cmax;
+ unsigned char cmin;
+};
+
+struct av8100_config {
+ struct i2c_client *client;
+ struct i2c_device_id *id;
+ struct av8100_video_input_format_cmd hdmi_video_input_cmd;
+ struct av8100_audio_input_format_cmd hdmi_audio_input_cmd;
+ struct av8100_video_output_format_cmd hdmi_video_output_cmd;
+ struct av8100_video_scaling_format_cmd hdmi_video_scaling_cmd;
+ enum av8100_color_transform color_transform;
+ struct av8100_cec_message_write_format_cmd
+ hdmi_cec_message_write_cmd;
+ struct av8100_cec_message_read_back_format_cmd
+ hdmi_cec_message_read_back_cmd;
+ struct av8100_denc_format_cmd hdmi_denc_cmd;
+ struct av8100_hdmi_cmd hdmi_cmd;
+ struct av8100_hdcp_send_key_format_cmd hdmi_hdcp_send_key_cmd;
+ struct av8100_hdcp_management_format_cmd
+ hdmi_hdcp_management_format_cmd;
+ struct av8100_infoframes_format_cmd hdmi_infoframes_cmd;
+ struct av8100_edid_section_readback_format_cmd
+ hdmi_edid_section_readback_cmd;
+ struct av8100_pattern_generator_format_cmd hdmi_pattern_generator_cmd;
+ struct av8100_fuse_aes_key_format_cmd hdmi_fuse_aes_key_cmd;
+};
+
+enum av8100_plug_state {
+ AV8100_UNPLUGGED,
+ AV8100_PLUGGED_STARTUP,
+ AV8100_PLUGGED
+};
+
+struct av8100_params {
+ int denc_off_time;/* 5 volt time */
+ int hdmi_off_time;/* 5 volt time */
+ int on_time;/* 5 volt time */
+ u8 hpdm;/*stby_int_mask*/
+ u8 cpdm;/*stby_int_mask*/
+ u8 cecm;/*gen_int_mask*/
+ u8 hdcpm;/*gen_int_mask*/
+ u8 uovbm;/*gen_int_mask*/
+ void (*hdmi_ev_cb)(enum av8100_hdmi_event);
+ enum av8100_plug_state plug_state;
+ struct clk *inputclk;
+ bool inputclk_requested;
+ bool opp_requested;
+ struct regulator *regulator_pwr;
+ bool regulator_requested;
+ bool pre_suspend_power;
+ bool ints_enabled;
+ bool irq_requested;
+};
+
+/**
+ * struct av8100_cea - CEA(consumer electronic access) standard structure
+ * @cea_id:
+ * @cea_nb:
+ * @vtotale:
+ **/
+
+struct av8100_cea {
+ char cea_id[40];
+ int cea_nb;
+ int vtotale;
+ int vactive;
+ int vsbp;
+ int vslen;
+ int vsfp;
+ char vpol[5];
+ int htotale;
+ int hactive;
+ int hbp;
+ int hslen;
+ int hfp;
+ int frequence;
+ char hpol[5];
+ int reg_line_duration;
+ int blkoel_duration;
+ int uix4;
+ int pll_mult;
+ int pll_div;
+};
+
+enum av8100_command_size {
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE = 0x17,
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE = 0x8,
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE = 0x1E,
+ AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE = 0x11,
+ AV8100_COMMAND_COLORSPACECONVERSION_SIZE = 0x1D,
+ AV8100_COMMAND_CEC_MESSAGE_WRITE_SIZE = 0x12,
+ AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE = 0x1,
+ AV8100_COMMAND_DENC_SIZE = 0x6,
+ AV8100_COMMAND_HDMI_SIZE = 0x4,
+ AV8100_COMMAND_HDCP_SENDKEY_SIZE = 0xA,
+ AV8100_COMMAND_HDCP_MANAGEMENT_SIZE = 0x3,
+ AV8100_COMMAND_INFOFRAMES_SIZE = 0x21,
+ AV8100_COMMAND_EDID_SECTION_READBACK_SIZE = 0x3,
+ AV8100_COMMAND_PATTERNGENERATOR_SIZE = 0x4,
+ AV8100_COMMAND_FUSE_AES_KEY_SIZE = 0x12,
+ AV8100_COMMAND_FUSE_AES_CHK_SIZE = 0x2,
+};
+
+struct av8100_device {
+ struct list_head list;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct av8100_config config;
+ struct av8100_status status;
+ struct timer_list timer;
+ wait_queue_head_t event;
+ int flag;
+ struct av8100_params params;
+ u8 chip_version;
+};
+
+static const unsigned int waittime_retry[10] = {
+ 1, 2, 4, 6, 8, 10, 10, 10, 10, 10};
+
+static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on);
+static void clr_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status);
+static void set_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status);
+static void cec_rx(struct av8100_device *adev);
+static void cec_tx(struct av8100_device *adev);
+static void cec_txerr(struct av8100_device *adev);
+static void hdcp_changed(struct av8100_device *adev);
+static const struct color_conversion_cmd *get_color_transform_cmd(
+ struct av8100_device *adev,
+ enum av8100_color_transform transform);
+static int av8100_open(struct inode *inode, struct file *filp);
+static int av8100_release(struct inode *inode, struct file *filp);
+static long av8100_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+static int __devinit av8100_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id);
+static int __devexit av8100_remove(struct i2c_client *i2c_client);
+
+static const struct file_operations av8100_fops = {
+ .owner = THIS_MODULE,
+ .open = av8100_open,
+ .release = av8100_release,
+ .unlocked_ioctl = av8100_ioctl
+};
+
+/* List of devices */
+static LIST_HEAD(av8100_device_list);
+
+static const struct av8100_cea av8100_all_cea[29] = {
+/* cea id
+ * cea_nr vtot vact vsbpp vslen
+ * vsfp vpol htot hact hbp hslen hfp freq
+ * hpol rld bd uix4 pm pd */
+{ "0 CUSTOM ",
+ 0, 0, 0, 0, 0,
+ 0, "-", 800, 640, 16, 96, 10, 25200000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be defined*/
+{ "1 CEA 1 VESA 4 640x480p @ 60 Hz ",
+ 1, 525, 480, 33, 2,
+ 10, "-", 800, 640, 49, 290, 146, 25200000,
+ "-", 2438, 1270, 6, 32, 1},/*RGB888*/
+{ "2 CEA 2 - 3 720x480p @ 60 Hz 4:3 ",
+ 2, 525, 480, 30, 6,
+ 9, "-", 858, 720, 34, 130, 128, 27027000,
+ "-", 1828, 0x3C0, 8, 24, 1},/*RGB565*/
+{ "3 CEA 4 1280x720p @ 60 Hz ",
+ 4, 750, 720, 20, 5,
+ 5, "+", 1650, 1280, 114, 39, 228, 74250000,
+ "+", 1706, 164, 6, 32, 1},/*RGB565*/
+{ "4 CEA 5 1920x1080i @ 60 Hz ",
+ 5, 1125, 540, 20, 5,
+ 0, "+", 2200, 1920, 88, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "5 CEA 6-7 480i (NTSC) ",
+ 6, 525, 240, 44, 5,
+ 0, "-", 858, 720, 12, 64, 10, 13513513,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "6 CEA 14-15 480p @ 60 Hz ",
+ 14, 525, 480, 44, 5,
+ 0, "-", 858, 720, 12, 64, 10, 27027000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "7 CEA 16 1920x1080p @ 60 Hz ",
+ 16, 1125, 1080, 36, 5,
+ 0, "+", 1980, 1280, 440, 40, 10, 133650000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "8 CEA 17-18 720x576p @ 50 Hz ",
+ 17, 625, 576, 44, 5,
+ 0, "-", 864, 720, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "9 CEA 19 1280x720p @ 50 Hz ",
+ 19, 750, 720, 25, 5,
+ 0, "+", 1980, 1280, 440, 40, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "10 CEA 20 1920 x 1080i @ 50 Hz ",
+ 20, 1125, 540, 20, 5,
+ 0, "+", 2640, 1920, 528, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "11 CEA 21-22 576i (PAL) ",
+ 21, 625, 288, 44, 5,
+ 0, "-", 1728, 1440, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "12 CEA 29/30 576p ",
+ 29, 625, 576, 44, 5,
+ 0, "-", 864, 720, 12, 64, 10, 27000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "13 CEA 31 1080p 50Hz ",
+ 31, 1125, 1080, 44, 5,
+ 0, "-", 2640, 1920, 12, 64, 10, 148500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "14 CEA 32 1920x1080p @ 24 Hz ",
+ 32, 1125, 1080, 36, 5,
+ 4, "+", 2750, 1920, 660, 44, 153, 74250000,
+ "+", 2844, 0x530, 6, 32, 1},/*RGB565*/
+{ "15 CEA 33 1920x1080p @ 25 Hz ",
+ 33, 1125, 1080, 36, 5,
+ 4, "+", 2640, 1920, 528, 44, 10, 74250000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "16 CEA 34 1920x1080p @ 30Hz ",
+ 34, 1125, 1080, 36, 5,
+ 4, "+", 2200, 1920, 91, 44, 153, 74250000,
+ "+", 2275, 0xAB, 6, 32, 1},/*RGB565*/
+{ "17 CEA 60 1280x720p @ 24 Hz ",
+ 60, 750, 720, 20, 5,
+ 5, "+", 3300, 1280, 284, 50, 2276, 59400000,
+ "+", 4266, 0xAD0, 5, 32, 1},/*RGB565*/
+{ "18 CEA 61 1280x720p @ 25 Hz ",
+ 61, 750, 720, 20, 5,
+ 5, "+", 3960, 1280, 228, 39, 2503, 74250000,
+ "+", 4096, 0x500, 5, 32, 1},/*RGB565*/
+{ "19 CEA 62 1280x720p @ 30 Hz ",
+ 62, 750, 720, 20, 5,
+ 5, "+", 3300, 1280, 228, 39, 1820, 74250000,
+ "+", 3413, 0x770, 5, 32, 1},/*RGB565*/
+{ "20 VESA 9 800x600 @ 60 Hz ",
+ 109, 628, 600, 28, 4,
+ 0, "+", 1056, 800, 40, 128, 10, 40000000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "21 VESA 14 848x480 @ 60 Hz ",
+ 114, 517, 480, 20, 5,
+ 0, "+", 1088, 848, 24, 80, 10, 33750000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "22 VESA 16 1024x768 @ 60 Hz ",
+ 116, 806, 768, 38, 6,
+ 0, "-", 1344, 1024, 24, 135, 10, 65000000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "23 VESA 22 1280x768 @ 60 Hz ",
+ 122, 790, 768, 34, 4,
+ 0, "+", 1440, 1280, 48, 160, 10, 68250000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "24 VESA 23 1280x768 @ 60 Hz ",
+ 123, 798, 768, 30, 7,
+ 0, "+", 1664, 1280, 64, 128, 10, 79500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "25 VESA 27 1280x800 @ 60 Hz ",
+ 127, 823, 800, 23, 6,
+ 0, "+", 1440, 1280, 48, 32, 10, 71000000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "26 VESA 28 1280x800 @ 60 Hz ",
+ 128, 831, 800, 31, 6,
+ 0, "+", 1680, 1280, 72, 128, 10, 83500000,
+ "-", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "27 VESA 39 1360x768 @ 60 Hz ",
+ 139, 795, 768, 22, 5,
+ 0, "-", 1792, 1360, 48, 32, 10, 85500000,
+ "+", 0, 0, 0, 0, 0},/*Settings to be define*/
+{ "28 VESA 81 1366x768 @ 60 Hz ",
+ 181, 798, 768, 30, 5,
+ 0, "+", 1792, 1366, 72, 136, 10, 85750000,
+ "-", 0, 0, 0, 0, 0} /*Settings to be define*/
+};
+
+const struct color_conversion_cmd col_trans_identity = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_identity_clamp_yuv = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xeb,
+ .lmin = 0x10,
+ .cmax = 0xf0,
+ .cmin = 0x10,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_rgb_v1 = {
+ .c0 = 0x0087, .c1 = 0x0000, .c2 = 0x00ba,
+ .c3 = 0x0087, .c4 = 0xffd3, .c5 = 0xffa1,
+ .c6 = 0x0087, .c7 = 0x00eb, .c8 = 0x0000,
+ .aoffset = 0xffab, .boffset = 0x004e, .coffset = 0xff92,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_rgb_v2 = {
+ .c0 = 0x0198, .c1 = 0x012a, .c2 = 0x0000,
+ .c3 = 0xff30, .c4 = 0x012a, .c5 = 0xff9c,
+ .c6 = 0x0000, .c7 = 0x012a, .c8 = 0x0204,
+ .aoffset = 0xff21, .boffset = 0x0088, .coffset = 0xfeeb,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+const struct color_conversion_cmd col_trans_yuv_to_denc = {
+ .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000,
+ .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000,
+ .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100,
+ .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000,
+ .lmax = 0xeb,
+ .lmin = 0x10,
+ .cmax = 0xf0,
+ .cmin = 0x10,
+};
+
+const struct color_conversion_cmd col_trans_rgb_to_denc = {
+ .c0 = 0x0070, .c1 = 0xffb6, .c2 = 0xffda,
+ .c3 = 0x0042, .c4 = 0x0081, .c5 = 0x0019,
+ .c6 = 0xffee, .c7 = 0xffa2, .c8 = 0x0070,
+ .aoffset = 0x007f, .boffset = 0x0010, .coffset = 0x007f,
+ .lmax = 0xff,
+ .lmin = 0x00,
+ .cmax = 0xff,
+ .cmin = 0x00,
+};
+
+static const struct i2c_device_id av8100_id[] = {
+ { "av8100", 0 },
+ { }
+};
+
+static struct av8100_device *devnr_to_adev(int devnr)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (cnt == devnr)
+ return av8100_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static struct av8100_device *dev_to_adev(struct device *dev)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (av8100_dev->dev == dev)
+ return av8100_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static int adev_to_devnr(struct av8100_device *adev)
+{
+ /* Get devnr from list of devices */
+ struct list_head *element;
+ struct av8100_device *av8100_dev;
+ int cnt = 0;
+
+ list_for_each(element, &av8100_device_list) {
+ av8100_dev = list_entry(element, struct av8100_device, list);
+ if (av8100_dev == adev)
+ return cnt;
+ cnt++;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_PM
+static int av8100_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ adev->params.pre_suspend_power =
+ (av8100_status_get().av8100_state > AV8100_OPMODE_SHUTDOWN);
+
+ if (adev->params.pre_suspend_power) {
+ ret = av8100_powerdown();
+ if (ret)
+ dev_err(dev, "av8100_powerdown failed\n");
+ }
+
+ return ret;
+}
+
+static int av8100_resume(struct device *dev)
+{
+ int ret;
+ u8 hpds = 0;
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (adev->params.pre_suspend_power) {
+ ret = av8100_powerup();
+ if (ret) {
+ dev_err(dev, "av8100_powerup failed\n");
+ return ret;
+ }
+
+ /* Check HDMI plug status */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, NULL, NULL)) {
+ dev_warn(dev, "av8100_reg_stby_r failed\n");
+ goto av8100_resume_end;
+ }
+
+ if (hpds)
+ set_plug_status(adev, AV8100_HDMI_PLUGIN); /* Plugged*/
+ else
+ clr_plug_status(adev,
+ AV8100_HDMI_PLUGIN); /* Unplugged*/
+
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ av8100_enable_interrupt();
+ }
+
+av8100_resume_end:
+ return 0;
+}
+
+static const struct dev_pm_ops av8100_dev_pm_ops = {
+ .suspend = av8100_suspend,
+ .resume = av8100_resume,
+};
+#endif
+
+static struct i2c_driver av8100_driver = {
+ .probe = av8100_probe,
+ .remove = av8100_remove,
+ .driver = {
+ .name = "av8100",
+#ifdef CONFIG_PM
+ .pm = &av8100_dev_pm_ops,
+#endif
+ },
+ .id_table = av8100_id,
+};
+
+static void av8100_plugtimer_int(unsigned long value)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev((int)value);
+ adev->flag |= AV8100_PLUGSTARTUP_EVENT;
+ wake_up_interruptible(&adev->event);
+ del_timer(&adev->timer);
+}
+
+static int av8100_int_event_handle(struct av8100_device *adev)
+{
+ u8 hpdi = 0;
+ u8 cpdi = 0;
+ u8 uovbi = 0;
+ u8 hdcpi = 0;
+ u8 ceci = 0;
+ u8 hpds = 0;
+ u8 cpds = 0;
+ u8 hdcps = 0;
+ u8 onuvb = 0;
+ u8 cectxerr = 0;
+ u8 cecrx = 0;
+ u8 cectx = 0;
+
+ /* STANDBY_PENDING_INTERRUPT reg */
+ if (av8100_reg_stby_pend_int_r(&hpdi, &cpdi, NULL, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_stby_pend_int_r failed\n");
+ goto av8100_int_event_handle_1;
+ }
+
+ /* Plug event */
+ if (hpdi | cpdi) {
+ /* Clear pending interrupts */
+ (void)av8100_reg_stby_pend_int_w(1, 1, 1, 0);
+
+ /* STANDBY reg */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_stby_r failed\n");
+ goto av8100_int_event_handle_1;
+ }
+ }
+
+ if (cpdi & adev->params.cpdm) {
+ /* TVout plugin change */
+ if (cpds) {
+ dev_dbg(adev->dev, "cpds 1\n");
+ set_plug_status(adev, AV8100_CVBS_PLUGIN);
+ } else {
+ dev_dbg(adev->dev, "cpds 0\n");
+ clr_plug_status(adev, AV8100_CVBS_PLUGIN);
+ }
+ }
+
+ if (hpdi & adev->params.hpdm) {
+ /* HDMI plugin change */
+ if (hpds) {
+ /* Plugged */
+ /* Set 5V always on */
+ av8100_5V_w(adev->params.denc_off_time,
+ 0,
+ adev->params.on_time);
+ dev_dbg(adev->dev, "hpds 1\n");
+ set_plug_status(adev, AV8100_HDMI_PLUGIN);
+ } else {
+ /* Unplugged */
+ av8100_5V_w(adev->params.denc_off_time,
+ adev->params.hdmi_off_time,
+ adev->params.on_time);
+ dev_dbg(adev->dev, "hpds 0\n");
+ clr_plug_status(adev, AV8100_HDMI_PLUGIN);
+ }
+ }
+
+av8100_int_event_handle_1:
+ /* GENERAL_INTERRUPT reg */
+ if (av8100_reg_gen_int_r(NULL, NULL, NULL, &ceci,
+ &hdcpi, &uovbi, NULL)) {
+ dev_dbg(adev->dev, "av8100_reg_gen_int_r failed\n");
+ return -EINVAL;
+ }
+
+ /* CEC or HDCP event */
+ if (ceci | hdcpi | uovbi) {
+ /* Clear pending interrupts */
+ av8100_reg_gen_int_w(1, 1, 1, 1, 1, 1);
+
+ /* GENERAL_STATUS reg */
+ if (av8100_reg_gen_status_r(&cectxerr, &cecrx, &cectx, NULL,
+ &onuvb, &hdcps) != 0) {
+ dev_dbg(adev->dev, "av8100_reg_gen_status_r fail\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Underflow or overflow */
+ if (uovbi)
+ dev_dbg(adev->dev, "uovbi %d\n", onuvb);
+
+ /* CEC received */
+ if (ceci && cecrx) {
+ u8 val;
+
+ dev_dbg(adev->dev, "cecrx\n");
+
+ /* Clear cecrx in status reg*/
+ if (av8100_reg_r(AV8100_GENERAL_STATUS, &val) == 0) {
+ if (av8100_reg_w(AV8100_GENERAL_STATUS,
+ val & ~AV8100_GENERAL_STATUS_CECREC_MASK))
+ dev_info(adev->dev, "gen_stat write error\n");
+ } else {
+ dev_info(adev->dev, "gen_stat read error\n");
+ }
+
+ /* Report CEC event */
+ cec_rx(adev);
+ }
+
+ /* CEC tx error */
+ if (ceci && cectx && cectxerr) {
+ dev_dbg(adev->dev, "cectxerr\n");
+ /* Report CEC tx error event */
+ cec_txerr(adev);
+ } else if (ceci && cectx) {
+ dev_dbg(adev->dev, "cectx\n");
+ /* Report CEC tx event */
+ cec_tx(adev);
+ }
+
+ /* HDCP event */
+ if (hdcpi) {
+ dev_dbg(adev->dev, "hdcpch:%0x\n", hdcps);
+ /* Report HDCP status change event */
+ hdcp_changed(adev);
+ }
+
+ return 0;
+}
+
+static int av8100_plugstartup_event_handle(struct av8100_device *adev)
+{
+ u8 hpds = 0;
+ u8 cpds = 0;
+
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ case AV8100_PLUGGED:
+ default:
+ break;
+
+ case AV8100_PLUGGED_STARTUP:
+ /* Unmask interrupt */
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ if (av8100_reg_stby_int_mask_w(adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) {
+ dev_dbg(adev->dev,
+ "av8100_reg_stby_int_mask_w fail\n");
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ /* Get actual plug status */
+ if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL))
+ dev_dbg(adev->dev, "av8100_reg_stby_r fail\n");
+
+ /* Set plugstate */
+ if (hpds) {
+ adev->params.plug_state = AV8100_PLUGGED;
+ dev_dbg(adev->dev, "plug_state:2\n");
+ } else {
+ adev->params.plug_state = AV8100_UNPLUGGED;
+ dev_dbg(adev->dev, "plug_state:0\n");
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int av8100_thread(void *p)
+{
+ u8 flags;
+ struct av8100_device *adev = p;
+
+ while (1) {
+ wait_event_interruptible(adev->event, (adev->flag != 0));
+ flags = adev->flag;
+ adev->flag = 0;
+
+ if (adev->status.av8100_state < AV8100_OPMODE_STANDBY)
+ continue;
+
+ if (flags & AV8100_INT_EVENT)
+ (void)av8100_int_event_handle(adev);
+
+ if (flags & AV8100_PLUGSTARTUP_EVENT)
+ (void)av8100_plugstartup_event_handle(adev);
+ }
+
+ return 0;
+}
+
+static irqreturn_t av8100_intr_handler(int irq, void *p)
+{
+ struct av8100_device *adev;
+
+ adev = (struct av8100_device *) p;
+ adev->flag |= AV8100_INT_EVENT;
+ wake_up_interruptible(&adev->event);
+
+ return IRQ_HANDLED;
+}
+
+static u16 av8100_get_te_line_nb(
+ enum av8100_output_CEA_VESA output_video_format)
+{
+ u16 retval;
+
+ switch (output_video_format) {
+ case AV8100_CEA1_640X480P_59_94HZ:
+ case AV8100_CEA2_3_720X480P_59_94HZ:
+ case AV8100_VESA16_1024X768P_60HZ:
+ retval = AV8100_TE_LINE_NB_30;
+ break;
+
+ case AV8100_CEA4_1280X720P_60HZ:
+ case AV8100_CEA60_1280X720P_24HZ:
+ case AV8100_CEA61_1280X720P_25HZ:
+ case AV8100_CEA62_1280X720P_30HZ:
+ retval = AV8100_TE_LINE_NB_21;
+ break;
+
+ case AV8100_CEA5_1920X1080I_60HZ:
+ case AV8100_CEA6_7_NTSC_60HZ:
+ case AV8100_CEA20_1920X1080I_50HZ:
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ case AV8100_VESA27_1280X800P_59_91HZ:
+ retval = AV8100_TE_LINE_NB_18;
+ break;
+
+ case AV8100_CEA14_15_480p_60HZ:
+ retval = AV8100_TE_LINE_NB_32;
+ break;
+
+ case AV8100_CEA17_18_720X576P_50HZ:
+ case AV8100_CEA29_30_576P_50HZ:
+ retval = AV8100_TE_LINE_NB_40;
+ break;
+
+ case AV8100_CEA19_1280X720P_50HZ:
+ case AV8100_VESA39_1360X768P_60_02HZ:
+ retval = AV8100_TE_LINE_NB_22;
+ break;
+
+ case AV8100_CEA32_1920X1080P_24HZ:
+ case AV8100_CEA33_1920X1080P_25HZ:
+ case AV8100_CEA34_1920X1080P_30HZ:
+ retval = AV8100_TE_LINE_NB_38;
+ break;
+
+ case AV8100_VESA9_800X600P_60_32HZ:
+ retval = AV8100_TE_LINE_NB_24;
+ break;
+
+ case AV8100_VESA14_848X480P_60HZ:
+ retval = AV8100_TE_LINE_NB_29;
+ break;
+
+ case AV8100_VESA22_1280X768P_59_99HZ:
+ retval = AV8100_TE_LINE_NB_17;
+ break;
+
+ case AV8100_VESA23_1280X768P_59_87HZ:
+ case AV8100_VESA81_1366X768P_59_79HZ:
+ retval = AV8100_TE_LINE_NB_25;
+ break;
+
+ case AV8100_VESA28_1280X800P_59_81HZ:
+ retval = AV8100_TE_LINE_NB_26;
+ break;
+
+ case AV8100_CEA16_1920X1080P_60HZ:
+ case AV8100_CEA31_1920x1080P_50Hz:
+ default:
+ /* TODO */
+ retval = AV8100_TE_LINE_NB_38;
+ break;
+ }
+
+ return retval;
+}
+
+static u16 av8100_get_ui_x4(
+ enum av8100_output_CEA_VESA output_video_format)
+{
+ return AV8100_UI_X4_DEFAULT;
+}
+
+static int av8100_config_video_output_dep(
+ enum av8100_output_CEA_VESA output_format)
+{
+ int retval;
+ union av8100_configuration config;
+
+ /* video input */
+ config.video_input_format.dsi_input_mode =
+ AV8100_HDMI_DSI_COMMAND_MODE;
+ config.video_input_format.input_pixel_format = AV8100_INPUT_PIX_RGB565;
+ config.video_input_format.total_horizontal_pixel =
+ av8100_all_cea[output_format].htotale;
+ config.video_input_format.total_horizontal_active_pixel =
+ av8100_all_cea[output_format].hactive;
+ config.video_input_format.total_vertical_lines =
+ av8100_all_cea[output_format].vtotale;
+ config.video_input_format.total_vertical_active_lines =
+ av8100_all_cea[output_format].vactive;
+
+ switch (output_format) {
+ case AV8100_CEA5_1920X1080I_60HZ:
+ case AV8100_CEA20_1920X1080I_50HZ:
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ case AV8100_CEA6_7_NTSC_60HZ:
+ config.video_input_format.video_mode =
+ AV8100_VIDEO_INTERLACE;
+ break;
+
+ default:
+ config.video_input_format.video_mode =
+ AV8100_VIDEO_PROGRESSIVE;
+ break;
+ }
+
+ config.video_input_format.nb_data_lane =
+ AV8100_DATA_LANES_USED_2;
+ config.video_input_format.nb_virtual_ch_command_mode = 0;
+ config.video_input_format.nb_virtual_ch_video_mode = 0;
+ config.video_input_format.ui_x4 = av8100_get_ui_x4(output_format);
+ config.video_input_format.TE_line_nb = av8100_get_te_line_nb(
+ output_format);
+ config.video_input_format.TE_config = AV8100_TE_OFF;
+ config.video_input_format.master_clock_freq = 0;
+
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_VIDEO_INPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* DENC */
+ switch (output_format) {
+ case AV8100_CEA21_22_576I_PAL_50HZ:
+ config.denc_format.cvbs_video_format = AV8100_CVBS_625;
+ config.denc_format.standard_selection = AV8100_PAL_BDGHI;
+ break;
+
+ case AV8100_CEA6_7_NTSC_60HZ:
+ config.denc_format.cvbs_video_format = AV8100_CVBS_525;
+ config.denc_format.standard_selection = AV8100_NTSC_M;
+ break;
+
+ default:
+ /* Not supported */
+ break;
+ }
+
+ return 0;
+}
+
+static int av8100_config_init(struct av8100_device *adev)
+{
+ int retval;
+ union av8100_configuration config;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ memset(&config, 0, sizeof(union av8100_configuration));
+ memset(&adev->config, 0, sizeof(struct av8100_config));
+
+ /* Color conversion */
+ config.color_transform = AV8100_COLOR_TRANSFORM_INDENTITY;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_COLORSPACECONVERSION, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* DENC */
+ config.denc_format.cvbs_video_format = AV8100_CVBS_625;
+ config.denc_format.standard_selection = AV8100_PAL_BDGHI;
+ config.denc_format.enable = 0;
+ config.denc_format.macrovision_enable = 0;
+ config.denc_format.internal_generator = 0;
+ retval = av8100_conf_prep(AV8100_COMMAND_DENC, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Video output */
+ config.video_output_format.video_output_cea_vesa =
+ AV8100_CEA4_1280X720P_60HZ;
+
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Video input */
+ av8100_config_video_output_dep(
+ config.video_output_format.video_output_cea_vesa);
+
+ /* Pattern generator */
+ config.pattern_generator_format.pattern_audio_mode =
+ AV8100_PATTERN_AUDIO_OFF;
+ config.pattern_generator_format.pattern_type =
+ AV8100_PATTERN_GENERATOR;
+ config.pattern_generator_format.pattern_video_format =
+ AV8100_PATTERN_720P;
+ retval = av8100_conf_prep(AV8100_COMMAND_PATTERNGENERATOR,
+ &config);
+ if (retval)
+ return -EFAULT;
+
+ /* Audio input */
+ config.audio_input_format.audio_input_if_format =
+ AV8100_AUDIO_I2SDELAYED_MODE;
+ config.audio_input_format.i2s_input_nb = 1;
+ config.audio_input_format.sample_audio_freq = AV8100_AUDIO_FREQ_48KHZ;
+ config.audio_input_format.audio_word_lg = AV8100_AUDIO_16BITS;
+ config.audio_input_format.audio_format = AV8100_AUDIO_LPCM_MODE;
+ config.audio_input_format.audio_if_mode = AV8100_AUDIO_MASTER;
+ config.audio_input_format.audio_mute = AV8100_AUDIO_MUTE_DISABLE;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_AUDIO_INPUT_FORMAT, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* HDMI mode */
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ config.hdmi_format.hdmi_format = AV8100_HDMI;
+ config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ retval = av8100_conf_prep(AV8100_COMMAND_HDMI, &config);
+ if (retval)
+ return -EFAULT;
+
+ /* EDID section readback */
+ config.edid_section_readback_format.address = 0xA0;
+ config.edid_section_readback_format.block_number = 0;
+ retval = av8100_conf_prep(
+ AV8100_COMMAND_EDID_SECTION_READBACK, &config);
+ if (retval)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int av8100_params_init(struct av8100_device *adev)
+{
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ memset(&adev->params, 0, sizeof(struct av8100_params));
+
+ adev->params.denc_off_time = AV8100_DENC_OFF_TIME;
+ adev->params.hdmi_off_time = AV8100_HDMI_OFF_TIME;
+ adev->params.on_time = AV8100_ON_TIME;
+
+ adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH;
+ adev->params.cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH;
+ adev->params.hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH;
+ adev->params.cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH;
+ adev->params.uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH;
+
+ return 0;
+}
+
+static void clr_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status)
+{
+ adev->status.av8100_plugin_status &= ~status;
+
+ switch (status) {
+ case AV8100_HDMI_PLUGIN:
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ case AV8100_PLUGGED_STARTUP:
+ default:
+ break;
+
+ case AV8100_PLUGGED:
+ adev->params.plug_state =
+ AV8100_UNPLUGGED;
+ dev_dbg(adev->dev, "plug_state:0\n");
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGOUT);
+ break;
+ }
+ break;
+
+ case AV8100_CVBS_PLUGIN:
+ /* TODO */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void set_plug_status(struct av8100_device *adev,
+ enum av8100_plugin_status status)
+{
+ adev->status.av8100_plugin_status |= status;
+
+ switch (status) {
+ case AV8100_HDMI_PLUGIN:
+ switch (adev->params.plug_state) {
+ case AV8100_UNPLUGGED:
+ adev->params.plug_state =
+ AV8100_PLUGGED_STARTUP;
+
+ dev_dbg(adev->dev, "plug_state:1\n");
+
+ /*
+ * Mask interrupts to avoid plug detect during
+ * startup
+ * */
+ adev->params.hpdm =
+ AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW;
+ if (av8100_reg_stby_int_mask_w(
+ adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) {
+ dev_dbg(adev->dev,
+ "av8100_reg_stby_int_mask_w fail\n");
+ }
+
+ /* Set plug startup timer */
+ init_timer(&adev->timer);
+ adev->timer.expires = jiffies +
+ AV8100_PLUGSTARTUP_TIME;
+ adev->timer.function =
+ av8100_plugtimer_int;
+ adev->timer.data = 0;
+ adev->timer.data = adev_to_devnr(adev);
+ mod_timer(&adev->timer, adev->timer.expires);
+
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(
+ AV8100_HDMI_EVENT_HDMI_PLUGIN);
+ break;
+
+ case AV8100_PLUGGED_STARTUP:
+ case AV8100_PLUGGED:
+ default:
+ break;
+ }
+ break;
+
+ case AV8100_CVBS_PLUGIN:
+ /* TODO */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void cec_rx(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CEC);
+}
+
+static void cec_tx(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTX);
+}
+
+static void cec_txerr(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTXERR);
+}
+
+static void hdcp_changed(struct av8100_device *adev)
+{
+ if (adev->params.hdmi_ev_cb)
+ adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_HDCP);
+}
+
+static void av8100_set_state(struct av8100_device *adev,
+ enum av8100_operating_mode state)
+{
+ adev->status.av8100_state = state;
+
+ if (state <= AV8100_OPMODE_STANDBY) {
+ clr_plug_status(adev, AV8100_HDMI_PLUGIN);
+ clr_plug_status(adev, AV8100_CVBS_PLUGIN);
+ adev->status.hdmi_on = false;
+ }
+}
+
+/**
+ * write_single_byte() - Write a single byte to av8100
+ * through i2c interface.
+ * @client: i2c client structure
+ * @reg: register offset
+ * @data: data byte to be written
+ *
+ * This funtion uses smbus byte write API to write a single byte to av8100
+ **/
+static int write_single_byte(struct i2c_client *client, u8 reg,
+ u8 data)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_write_byte_data(client, reg, data);
+ if (ret < 0)
+ dev_dbg(dev, "i2c smbus write byte failed\n");
+
+ return ret;
+}
+
+/**
+ * read_single_byte() - read single byte from av8100
+ * through i2c interface
+ * @client: i2c client structure
+ * @reg: register offset
+ * @val: register value
+ *
+ * This funtion uses smbus read block API to read single byte from the reg
+ * offset.
+ **/
+static int read_single_byte(struct i2c_client *client, u8 reg, u8 *val)
+{
+ int value;
+ struct device *dev = &client->dev;
+
+ value = i2c_smbus_read_byte_data(client, reg);
+ if (value < 0) {
+ dev_dbg(dev, "i2c smbus read byte failed,read data = %x "
+ "from offset:%x\n" , value, reg);
+ return -EFAULT;
+ }
+
+ *val = (u8) value;
+ return 0;
+}
+
+/**
+ * write_multi_byte() - Write a multiple bytes to av8100 through
+ * i2c interface.
+ * @client: i2c client structure
+ * @buf: buffer to be written
+ * @nbytes: nunmber of bytes to be written
+ *
+ * This funtion uses smbus block write API's to write n number of bytes to the
+ * av8100
+ **/
+static int write_multi_byte(struct i2c_client *client, u8 reg,
+ u8 *buf, u8 nbytes)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ ret = i2c_smbus_write_i2c_block_data(client, reg, nbytes, buf);
+ if (ret < 0)
+ dev_dbg(dev, "i2c smbus write multi byte error\n");
+
+ return ret;
+}
+
+static int configuration_video_input_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_video_input_cmd.dsi_input_mode;
+ buffer[1] = adev->config.hdmi_video_input_cmd.input_pixel_format;
+ buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_pixel);
+ buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_pixel);
+ buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_active_pixel);
+ buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_horizontal_active_pixel);
+ buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_lines);
+ buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_lines);
+ buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_active_lines);
+ buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ total_vertical_active_lines);
+ buffer[10] = adev->config.hdmi_video_input_cmd.video_mode;
+ buffer[11] = adev->config.hdmi_video_input_cmd.nb_data_lane;
+ buffer[12] = adev->config.hdmi_video_input_cmd.
+ nb_virtual_ch_command_mode;
+ buffer[13] = adev->config.hdmi_video_input_cmd.
+ nb_virtual_ch_video_mode;
+ buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd.
+ TE_line_nb);
+ buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd.
+ TE_line_nb);
+ buffer[16] = adev->config.hdmi_video_input_cmd.TE_config;
+ buffer[17] = REG_32_8_MSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[18] = REG_32_8_MMSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[19] = REG_32_8_MLSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[20] = REG_32_8_LSB(adev->config.hdmi_video_input_cmd.
+ master_clock_freq);
+ buffer[21] = adev->config.hdmi_video_input_cmd.ui_x4;
+
+ *length = AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE - 1;
+ return 0;
+
+}
+
+static int configuration_audio_input_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_audio_input_cmd.audio_input_if_format;
+ buffer[1] = adev->config.hdmi_audio_input_cmd.i2s_input_nb;
+ buffer[2] = adev->config.hdmi_audio_input_cmd.sample_audio_freq;
+ buffer[3] = adev->config.hdmi_audio_input_cmd.audio_word_lg;
+ buffer[4] = adev->config.hdmi_audio_input_cmd.audio_format;
+ buffer[5] = adev->config.hdmi_audio_input_cmd.audio_if_mode;
+ buffer[6] = adev->config.hdmi_audio_input_cmd.audio_mute;
+
+ *length = AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_video_output_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_video_output_cmd.
+ video_output_cea_vesa;
+
+ if (buffer[0] == AV8100_CUSTOM) {
+ buffer[1] = adev->config.hdmi_video_output_cmd.
+ vsync_polarity;
+ buffer[2] = adev->config.hdmi_video_output_cmd.
+ hsync_polarity;
+ buffer[3] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_pixel);
+ buffer[4] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_pixel);
+ buffer[5] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_active_pixel);
+ buffer[6] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_horizontal_active_pixel);
+ buffer[7] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.total_vertical_in_half_lines);
+ buffer[8] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.total_vertical_in_half_lines);
+ buffer[9] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.
+ total_vertical_active_in_half_lines);
+ buffer[10] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.
+ total_vertical_active_in_half_lines);
+ buffer[11] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hsync_start_in_pixel);
+ buffer[12] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hsync_start_in_pixel);
+ buffer[13] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hsync_length_in_pixel);
+ buffer[14] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hsync_length_in_pixel);
+ buffer[15] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vsync_start_in_half_line);
+ buffer[16] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vsync_start_in_half_line);
+ buffer[17] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vsync_length_in_half_line);
+ buffer[18] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vsync_length_in_half_line);
+ buffer[19] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.hor_video_start_pixel);
+ buffer[20] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.hor_video_start_pixel);
+ buffer[21] = REG_16_8_MSB(adev->config.
+ hdmi_video_output_cmd.vert_video_start_pixel);
+ buffer[22] = REG_16_8_LSB(adev->config.
+ hdmi_video_output_cmd.vert_video_start_pixel);
+ buffer[23] = adev->config.hdmi_video_output_cmd.video_type;
+ buffer[24] = adev->config.hdmi_video_output_cmd.pixel_repeat;
+ buffer[25] = REG_32_8_MSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[26] = REG_32_8_MMSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[27] = REG_32_8_MLSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+ buffer[28] = REG_32_8_LSB(adev->config.
+ hdmi_video_output_cmd.pixel_clock_freq_Hz);
+
+ *length = AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE - 1;
+ } else {
+ *length = 1;
+ }
+
+ return 0;
+}
+
+static int configuration_video_scaling_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_in_pixel);
+ buffer[1] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_in_pixel);
+ buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_in_pixel);
+ buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_in_pixel);
+ buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_in_line);
+ buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_in_line);
+ buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_in_line);
+ buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_in_line);
+ buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_start_out_pixel);
+ buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd
+ .h_start_out_pixel);
+ buffer[10] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_out_pixel);
+ buffer[11] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ h_stop_out_pixel);
+ buffer[12] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_out_line);
+ buffer[13] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_start_out_line);
+ buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_out_line);
+ buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd.
+ v_stop_out_line);
+
+ *length = AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_colorspace_conversion_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ const struct color_conversion_cmd *hdmi_color_space_conversion_cmd;
+
+ hdmi_color_space_conversion_cmd =
+ get_color_transform_cmd(adev, adev->config.color_transform);
+
+ buffer[0] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c0);
+ buffer[1] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c0);
+ buffer[2] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c1);
+ buffer[3] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c1);
+ buffer[4] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c2);
+ buffer[5] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c2);
+ buffer[6] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c3);
+ buffer[7] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c3);
+ buffer[8] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c4);
+ buffer[9] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c4);
+ buffer[10] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c5);
+ buffer[11] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c5);
+ buffer[12] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c6);
+ buffer[13] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c6);
+ buffer[14] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c7);
+ buffer[15] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c7);
+ buffer[16] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c8);
+ buffer[17] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c8);
+ buffer[18] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->aoffset);
+ buffer[19] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->aoffset);
+ buffer[20] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->boffset);
+ buffer[21] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->boffset);
+ buffer[22] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->coffset);
+ buffer[23] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->coffset);
+ buffer[24] = hdmi_color_space_conversion_cmd->lmax;
+ buffer[25] = hdmi_color_space_conversion_cmd->lmin;
+ buffer[26] = hdmi_color_space_conversion_cmd->cmax;
+ buffer[27] = hdmi_color_space_conversion_cmd->cmin;
+
+ *length = AV8100_COMMAND_COLORSPACECONVERSION_SIZE - 1;
+ return 0;
+}
+
+static int configuration_cec_message_write_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_cec_message_write_cmd.buffer_length;
+ memcpy(&buffer[1], adev->config.hdmi_cec_message_write_cmd.buffer,
+ adev->config.hdmi_cec_message_write_cmd.buffer_length);
+
+ *length = adev->config.hdmi_cec_message_write_cmd.buffer_length + 1;
+
+ return 0;
+}
+
+static int configuration_cec_message_read_get(char *buffer,
+ unsigned int *length)
+{
+ /* No buffer data */
+ *length = AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE - 1;
+ return 0;
+}
+
+static int configuration_denc_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_denc_cmd.cvbs_video_format;
+ buffer[1] = adev->config.hdmi_denc_cmd.standard_selection;
+ buffer[2] = adev->config.hdmi_denc_cmd.enable;
+ buffer[3] = adev->config.hdmi_denc_cmd.macrovision_enable;
+ buffer[4] = adev->config.hdmi_denc_cmd.internal_generator;
+
+ *length = AV8100_COMMAND_DENC_SIZE - 1;
+ return 0;
+}
+
+static int configuration_hdmi_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_cmd.hdmi_mode;
+ buffer[1] = adev->config.hdmi_cmd.hdmi_format;
+ buffer[2] = adev->config.hdmi_cmd.dvi_format;
+
+ *length = AV8100_COMMAND_HDMI_SIZE - 1;
+ return 0;
+}
+
+static int configuration_hdcp_sendkey_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_hdcp_send_key_cmd.key_number;
+ memcpy(&buffer[1], adev->config.hdmi_hdcp_send_key_cmd.data,
+ adev->config.hdmi_hdcp_send_key_cmd.data_len);
+
+ *length = adev->config.hdmi_hdcp_send_key_cmd.data_len + 1;
+ return 0;
+}
+
+static int configuration_hdcp_management_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_hdcp_management_format_cmd.req_type;
+ buffer[1] = adev->config.hdmi_hdcp_management_format_cmd.encr_use;
+
+ *length = AV8100_COMMAND_HDCP_MANAGEMENT_SIZE - 1;
+ return 0;
+}
+
+static int configuration_infoframe_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_infoframes_cmd.type;
+ buffer[1] = adev->config.hdmi_infoframes_cmd.version;
+ buffer[2] = adev->config.hdmi_infoframes_cmd.length;
+ buffer[3] = adev->config.hdmi_infoframes_cmd.crc;
+ memcpy(&buffer[4], adev->config.hdmi_infoframes_cmd.data,
+ HDMI_INFOFRAME_DATA_SIZE);
+
+ *length = adev->config.hdmi_infoframes_cmd.length + 4;
+ return 0;
+}
+
+static int av8100_edid_section_readback_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_edid_section_readback_cmd.address;
+ buffer[1] = adev->config.hdmi_edid_section_readback_cmd.
+ block_number;
+
+ *length = AV8100_COMMAND_EDID_SECTION_READBACK_SIZE - 1;
+ return 0;
+}
+
+static int configuration_pattern_generator_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_pattern_generator_cmd.pattern_type;
+ buffer[1] = adev->config.hdmi_pattern_generator_cmd.
+ pattern_video_format;
+ buffer[2] = adev->config.hdmi_pattern_generator_cmd.
+ pattern_audio_mode;
+
+ *length = AV8100_COMMAND_PATTERNGENERATOR_SIZE - 1;
+ return 0;
+}
+
+static int configuration_fuse_aes_key_get(struct av8100_device *adev,
+ char *buffer, unsigned int *length)
+{
+ buffer[0] = adev->config.hdmi_fuse_aes_key_cmd.fuse_operation;
+ if (adev->config.hdmi_fuse_aes_key_cmd.fuse_operation) {
+ /* Write key command */
+ memcpy(&buffer[1], adev->config.hdmi_fuse_aes_key_cmd.key,
+ HDMI_FUSE_AES_KEY_SIZE);
+
+ *length = AV8100_COMMAND_FUSE_AES_KEY_SIZE - 1;
+ } else {
+ /* Check key command */
+ *length = AV8100_COMMAND_FUSE_AES_CHK_SIZE - 1;
+ }
+ return 0;
+}
+
+static int get_command_return_first(struct i2c_client *i2c,
+ enum av8100_command_type command_type) {
+ int retval = 0;
+ char val;
+ struct device *dev = &i2c->dev;
+
+ retval = read_single_byte(i2c, AV8100_COMMAND_OFFSET, &val);
+ if (retval) {
+ dev_dbg(dev, "%s 1st ret failed\n", __func__);
+ return retval;
+ }
+
+ if (val != (0x80 | command_type)) {
+ dev_dbg(dev, "%s 1st ret wrong:%x\n", __func__, val);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int get_command_return_data(struct i2c_client *i2c,
+ enum av8100_command_type command_type,
+ u8 *command_buffer,
+ u8 *buffer_length,
+ u8 *buffer)
+{
+ int retval = 0;
+ char val;
+ int index = 0;
+ struct device *dev = &i2c->dev;
+
+ if (buffer_length)
+ *buffer_length = 0;
+
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ case AV8100_COMMAND_DENC:
+ case AV8100_COMMAND_HDMI:
+ case AV8100_COMMAND_INFOFRAMES:
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the return buffer length */
+ retval = read_single_byte(i2c, AV8100_CEC_ADDR_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ dev_dbg(dev, "cec buflen:%d\n", val);
+ *buffer_length = val;
+
+ if (*buffer_length >
+ HDMI_CEC_READ_MAXSIZE) {
+ dev_dbg(dev, "CEC size too large %d\n",
+ *buffer_length);
+ *buffer_length = HDMI_CEC_READ_MAXSIZE;
+ }
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (index = 0; index < *buffer_length; ++index) {
+ retval = read_single_byte(i2c,
+ AV8100_CEC_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ }
+
+ dev_dbg(dev, "\n");
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ {
+ u8 nrdev;
+ u8 devcnt;
+ int cnt;
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval) {
+ goto get_command_return_data_fail2r;
+ } else {
+ /* Check the second return byte */
+ if (val)
+ goto get_command_return_data_fail2v;
+ }
+
+ if ((buffer == NULL) || (buffer_length == NULL))
+ /* Ignore return data */
+ break;
+
+ dev_dbg(dev, "req_type:%02x ", command_buffer[0]);
+
+ /* Check if revoc list data is requested */
+ if (command_buffer[0] !=
+ HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT) {
+ *buffer_length = 0;
+ break;
+ }
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+ }
+
+ /* Get Device count */
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &nrdev);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = nrdev;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+
+ /* Determine number of devices */
+ nrdev &= HDMI_HDCP_MGMT_DEVICE_MASK;
+ if (nrdev > HDMI_HDCP_MGMT_MAX_DEVICES_SIZE)
+ nrdev = HDMI_HDCP_MGMT_MAX_DEVICES_SIZE;
+
+ /* Get Bksv for each connected equipment */
+ for (devcnt = 0; devcnt < nrdev; devcnt++)
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index,
+ &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ",
+ *(buffer + index));
+ }
+ index++;
+ }
+
+ if (nrdev == 0)
+ goto hdcp_management_end;
+
+ /* Get SHA signature */
+ for (cnt = 0; cnt < HDMI_HDCP_MGMT_SHA_SIZE - 1; cnt++) {
+ retval = read_single_byte(i2c,
+ AV8100_HDCP_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ index++;
+ }
+
+hdcp_management_end:
+ *buffer_length = index;
+
+ dev_dbg(dev, "\n");
+ }
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Return buffer length is fixed */
+ *buffer_length = HDMI_EDIDREAD_SIZE;
+
+ dev_dbg(dev, "return data: ");
+
+ /* Get the return buffer */
+ for (index = 0; index < *buffer_length; ++index) {
+ retval = read_single_byte(i2c,
+ AV8100_EDID_RET_BUF_OFFSET + index, &val);
+ if (retval) {
+ *buffer_length = 0;
+ goto get_command_return_data_fail;
+ } else {
+ *(buffer + index) = val;
+ dev_dbg(dev, "%02x ", *(buffer + index));
+ }
+ }
+
+ dev_dbg(dev, "\n");
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ if ((buffer == NULL) || (buffer_length == NULL)) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ /* Check the second return byte */
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+
+ /* Return buffer length is fixed */
+ *buffer_length = HDMI_FUSE_AES_KEY_RET_SIZE;
+
+ /* Get CRC */
+ retval = read_single_byte(i2c,
+ AV8100_FUSE_CRC_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ *buffer = val;
+ dev_dbg(dev, "CRC:%02x ", val);
+
+ /* Get programmed status */
+ retval = read_single_byte(i2c,
+ AV8100_FUSE_PRGD_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail;
+
+ *(buffer + 1) = val;
+
+ dev_dbg(dev, "programmed:%02x ", val);
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ if ((command_buffer[0] == HDMI_LOADAES_END_BLK_NR) &&
+ ((buffer == NULL) || (buffer_length == NULL))) {
+ retval = -EINVAL;
+ goto get_command_return_data_fail;
+ }
+
+ /* Get the second return byte */
+ retval = read_single_byte(i2c,
+ AV8100_2ND_RET_BYTE_OFFSET, &val);
+ if (retval)
+ goto get_command_return_data_fail2r;
+
+ if (val) {
+ retval = -EFAULT;
+ goto get_command_return_data_fail2v;
+ }
+
+ if (command_buffer[0] == HDMI_LOADAES_END_BLK_NR) {
+ /* Return CRC32 if last AES block */
+ int cnt;
+
+ dev_dbg(dev, "CRC32:");
+ for (cnt = 0; cnt < HDMI_CRC32_SIZE; cnt++) {
+ if (read_single_byte(i2c,
+ AV8100_CRC32_OFFSET + cnt, &val))
+ goto get_command_return_data_fail;
+ *(buffer + cnt) = val;
+ dev_dbg(dev, "%02x", val);
+ }
+
+ *buffer_length = HDMI_CRC32_SIZE;
+ }
+ break;
+
+ default:
+ retval = -EFAULT;
+ break;
+ }
+
+ return retval;
+get_command_return_data_fail2r:
+ dev_dbg(dev, "%s Reading 2nd return byte failed\n", __func__);
+ return retval;
+get_command_return_data_fail2v:
+ dev_dbg(dev, "%s 2nd return byte is wrong:%x\n", __func__, val);
+ return retval;
+get_command_return_data_fail:
+ dev_dbg(dev, "%s FAIL\n", __func__);
+ return retval;
+}
+
+static int av8100_powerup1(struct av8100_device *adev)
+{
+ int retval;
+ struct av8100_platform_data *pdata = adev->dev->platform_data;
+
+ /* Regulator enable */
+ if ((adev->params.regulator_pwr) &&
+ (adev->params.regulator_requested == false)) {
+ retval = regulator_enable(adev->params.regulator_pwr);
+ if (retval < 0) {
+ dev_warn(adev->dev, "%s: regulator_enable failed\n",
+ __func__);
+ return retval;
+ }
+ dev_dbg(adev->dev, "regulator_enable ok\n");
+ adev->params.regulator_requested = true;
+ }
+
+ /* Reset av8100 */
+ gpio_set_value_cansleep(pdata->reset, 1);
+
+ /* Need to wait before proceeding */
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_STANDBY);
+
+ if (pdata->alt_powerupseq) {
+ dev_dbg(adev->dev, "powerup seq alt\n");
+ retval = av8100_5V_w(0, 0, AV8100_ON_TIME);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 1\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ udelay(AV8100_WATTIME_100US);
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 2\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ udelay(AV8100_WATTIME_100US);
+
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 3\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_LOW, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 4\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_err(adev->dev, "%s reg_wr err 5\n", __func__);
+ goto av8100_powerup1_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+ }
+
+ retval = request_irq(pdata->irq, av8100_intr_handler,
+ IRQF_TRIGGER_RISING, "av8100", adev);
+ if (retval == 0)
+ adev->params.irq_requested = true;
+ else
+ dev_err(adev->dev, "request_irq %d failed %d\n",
+ pdata->irq, retval);
+
+ return retval;
+
+av8100_powerup1_err:
+ av8100_powerdown();
+ return -EFAULT;
+}
+
+static int av8100_powerup2(struct av8100_device *adev)
+{
+ int retval;
+
+ /* ON time & OFF time on 5v HDMI plug detect */
+ retval = av8100_5V_w(adev->params.denc_off_time,
+ adev->params.hdmi_off_time,
+ adev->params.on_time);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return retval;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_set_state(adev, AV8100_OPMODE_SCAN);
+
+ return 0;
+}
+
+static int register_read_internal(u8 offset, u8 *value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ i2c = adev->config.client;
+
+ /* Read from register */
+ retval = read_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ return -EFAULT;
+ }
+
+ return retval;
+}
+
+static int register_write_internal(u8 offset, u8 value)
+{
+ int retval;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ i2c = adev->config.client;
+
+ /* Write to register */
+ retval = write_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int av8100_powerscan(void)
+{
+ int retval;
+ struct av8100_device *adev;
+ struct av8100_platform_data *pdata;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ pdata = adev->dev->platform_data;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (av8100_status_get().av8100_state > AV8100_OPMODE_SCAN) {
+ dev_dbg(adev->dev, "set to scan mode\n");
+
+ av8100_disable_interrupt();
+
+ /* Stby mode */
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_LOW, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write to av8100 register\n");
+ return retval;
+ }
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk &&
+ adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ av8100_enable_interrupt();
+
+ av8100_set_state(adev, AV8100_OPMODE_SCAN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_powerscan);
+
+int av8100_powerup(void)
+{
+ int ret = 0;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state < AV8100_OPMODE_STANDBY) {
+ ret = av8100_powerup1(adev);
+ if (ret) {
+ dev_err(adev->dev, "av8100_powerup1 fail\n");
+ return -EFAULT;
+ }
+ }
+
+ if (av8100_status_get().av8100_state < AV8100_OPMODE_SCAN)
+ ret = av8100_powerup2(adev);
+
+ av8100_enable_interrupt();
+
+ return ret;
+}
+EXPORT_SYMBOL(av8100_powerup);
+
+int av8100_powerdown(void)
+{
+ int retval = 0;
+ struct av8100_device *adev;
+ struct av8100_platform_data *pdata;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EFAULT;
+
+ pdata = adev->dev->platform_data;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ goto av8100_powerdown_end;
+
+ av8100_disable_interrupt();
+
+ if (adev->params.irq_requested)
+ free_irq(pdata->irq, adev);
+ adev->params.irq_requested = false;
+
+ if (pdata->alt_powerupseq) {
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH);
+
+ if (retval)
+ dev_err(adev->dev, "%s reg_wr err\n", __func__);
+ msleep(AV8100_WAITTIME_50MS);
+ }
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk && adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN);
+
+ gpio_set_value_cansleep(pdata->reset, 0);
+
+ /* Regulator disable */
+ if ((adev->params.regulator_pwr) &&
+ (adev->params.regulator_requested)) {
+ dev_dbg(adev->dev, "regulator_disable\n");
+ regulator_disable(adev->params.regulator_pwr);
+ adev->params.regulator_requested = false;
+ }
+
+ if (pdata->alt_powerupseq)
+ mdelay(AV8100_WAITTIME_5MS);
+
+av8100_powerdown_end:
+ return retval;
+}
+EXPORT_SYMBOL(av8100_powerdown);
+
+int av8100_download_firmware(enum interface_type if_type)
+{
+ int retval;
+ int temp = 0x0;
+ int increment = 15;
+ int index = 0;
+ int size = 0x0;
+ char val = 0x0;
+ char checksum = 0;
+ int cnt;
+ int cnt_max;
+ struct i2c_client *i2c;
+ u8 uc;
+ u8 fdl;
+ u8 hld;
+ u8 wa;
+ u8 ra;
+ struct av8100_platform_data *pdata;
+ const struct firmware *fw_file;
+ u8 *fw_buff;
+ int fw_bytes;
+ struct av8100_device *adev;
+ struct av8100_status status;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ LOCK_AV8100_FWDL;
+
+ status = av8100_status_get();
+ if (status.av8100_state <= AV8100_OPMODE_SHUTDOWN) {
+ retval = -EINVAL;
+ goto av8100_download_firmware_err2;
+ }
+
+ if (status.av8100_state >= AV8100_OPMODE_INIT) {
+ dev_dbg(adev->dev, "FW already ok\n");
+ retval = 0;
+ goto av8100_download_firmware_err2;
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_INIT);
+
+ pdata = adev->dev->platform_data;
+
+ /* Request firmware */
+ if (request_firmware(&fw_file,
+ AV8100_FW_FILENAME,
+ adev->dev)) {
+ dev_err(adev->dev, "fw request failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err2;
+ }
+
+ /* Master clock timing, running */
+ retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW,
+ AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ goto av8100_download_firmware_err;
+ }
+
+ mdelay(AV8100_WAITTIME_1MS);
+
+ /* Clock enable */
+ if (adev->params.inputclk &&
+ adev->params.inputclk_requested == false) {
+ if (clk_enable(adev->params.inputclk)) {
+ dev_err(adev->dev, "inputclk en failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ adev->params.inputclk_requested = true;
+ }
+
+ /* Request 100% APE OPP */
+ if (adev->params.opp_requested == false) {
+ if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name, 100)) {
+ dev_err(adev->dev, "APE OPP 100 failed\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name, 100)) {
+ dev_err(adev->dev, "DDR OPP 100 failed\n");
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ adev->params.opp_requested = true;
+ }
+
+ msleep(AV8100_WAITTIME_10MS);
+
+ /* Prepare firmware data */
+ fw_bytes = fw_file->size;
+ fw_buff = (u8 *)fw_file->data;
+ dev_dbg(adev->dev, "fw size:%d\n", fw_bytes);
+
+ i2c = adev->config.client;
+
+ /* Enable firmware download */
+ retval = av8100_reg_gen_ctrl_w(
+ AV8100_GENERAL_CONTROL_FDL_HIGH,
+ AV8100_GENERAL_CONTROL_HLD_HIGH,
+ AV8100_GENERAL_CONTROL_WA_LOW,
+ AV8100_GENERAL_CONTROL_RA_LOW);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ retval = av8100_reg_gen_ctrl_r(&fdl, &hld, &wa, &ra);
+ if (retval) {
+ dev_err(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ } else {
+ dev_dbg(adev->dev, "GENERAL_CONTROL_REG register fdl:%d "
+ "hld:%d wa:%d ra:%d\n", fdl, hld, wa, ra);
+ }
+
+ LOCK_AV8100_HW;
+
+ temp = fw_bytes % increment;
+ for (size = 0; size < (fw_bytes-temp); size = size + increment,
+ index += increment) {
+ if (if_type == I2C_INTERFACE) {
+ retval = write_multi_byte(i2c,
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size,
+ increment);
+ if (retval) {
+ dev_dbg(adev->dev, "Failed to download the "
+ "av8100 firmware\n");
+ UNLOCK_AV8100_HW;
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ } else if (if_type == DSI_INTERFACE) {
+ dev_dbg(adev->dev,
+ "DSI_INTERFACE is currently not supported\n");
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ } else {
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ }
+ }
+
+ /* Transfer last firmware bytes */
+ if (if_type == I2C_INTERFACE) {
+ retval = write_multi_byte(i2c,
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size, temp);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to download the av8100 firmware\n");
+ UNLOCK_AV8100_HW;
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+ } else if (if_type == DSI_INTERFACE) {
+ /* TODO: Add support for DSI firmware download */
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ } else {
+ UNLOCK_AV8100_HW;
+ retval = -EINVAL;
+ goto av8100_download_firmware_err;
+ }
+
+ /* check transfer*/
+ for (size = 0; size < fw_bytes; size++)
+ checksum = checksum ^ fw_buff[size];
+
+ UNLOCK_AV8100_HW;
+
+ retval = av8100_reg_fw_dl_entry_r(&val);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ dev_dbg(adev->dev, "checksum:%x,val:%x\n", checksum, val);
+
+ if (checksum != val) {
+ dev_dbg(adev->dev,
+ ">Fw downloading.... FAIL checksum issue\n");
+ dev_dbg(adev->dev, "checksum = %d\n", checksum);
+ dev_dbg(adev->dev, "checksum read: %d\n", val);
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ } else {
+ dev_dbg(adev->dev, ">Fw downloading.... success\n");
+ }
+
+ /* Set to idle mode */
+ av8100_reg_gen_ctrl_w(AV8100_GENERAL_CONTROL_FDL_LOW,
+ AV8100_GENERAL_CONTROL_HLD_LOW, AV8100_GENERAL_CONTROL_WA_LOW,
+ AV8100_GENERAL_CONTROL_RA_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ /* Wait Internal Micro controler ready */
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = av8100_reg_gen_status_r(NULL, NULL, NULL, &uc,
+ NULL, NULL);
+ while ((retval == 0) && (uc != 0x1) && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = av8100_reg_gen_status_r(NULL, NULL, NULL,
+ &uc, NULL, NULL);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "av8100 fwdl cnt:%d\n", cnt);
+
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from the av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_download_firmware_err;
+ }
+
+ if (uc != 0x1)
+ dev_dbg(adev->dev, "UC is not ready\n");
+
+ release_firmware(fw_file);
+
+ if (adev->chip_version != 1) {
+ char *cut_str;
+
+ /* Get cut version */
+ retval = read_single_byte(i2c, AV8100_CUTVER_OFFSET, &val);
+ if (retval) {
+ dev_err(adev->dev, "Read cut ver failed\n");
+ return retval;
+ }
+
+ switch (val) {
+ case 0x00:
+ cut_str = CUT_STR_0;
+ break;
+ case 0x01:
+ cut_str = CUT_STR_1;
+ break;
+ case 0x03:
+ cut_str = CUT_STR_3;
+ break;
+ case 0x30:
+ cut_str = CUT_STR_30;
+ break;
+ default:
+ cut_str = CUT_STR_UNKNOWN;
+ break;
+ }
+ dev_dbg(adev->dev, "Cut ver %d %s\n", val, cut_str);
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_IDLE);
+
+ UNLOCK_AV8100_FWDL;
+ return 0;
+
+av8100_download_firmware_err:
+ release_firmware(fw_file);
+
+ /* Remove APE OPP requirement */
+ if (adev->params.opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)adev->miscdev.name);
+ prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP,
+ (char *)adev->miscdev.name);
+ adev->params.opp_requested = false;
+ }
+
+ /* Clock disable */
+ if (adev->params.inputclk && adev->params.inputclk_requested) {
+ clk_disable(adev->params.inputclk);
+ adev->params.inputclk_requested = false;
+ }
+
+av8100_download_firmware_err2:
+ UNLOCK_AV8100_FWDL;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_download_firmware);
+
+int av8100_disable_interrupt(void)
+{
+ int retval;
+ u8 hpdm = 0;
+ u8 cpdm = 0;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (!adev->params.ints_enabled)
+ return 0;
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_gen_int_mask_w(
+ AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ hpdm = adev->params.hpdm;
+ cpdm = adev->params.cpdm;
+
+ retval = av8100_reg_stby_int_mask_w(
+ AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW,
+ AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ adev->params.hpdm = hpdm;
+ adev->params.cpdm = cpdm;
+ adev->params.ints_enabled = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_disable_interrupt);
+
+int av8100_enable_interrupt(void)
+{
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (adev->params.ints_enabled)
+ return 0;
+
+ retval = av8100_reg_stby_pend_int_w(
+ AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW,
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_gen_int_mask_w(
+ AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW,
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW,
+ adev->params.cecm,
+ adev->params.hdcpm,
+ adev->params.uovbm,
+ AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ retval = av8100_reg_stby_int_mask_w(
+ adev->params.hpdm,
+ adev->params.cpdm,
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT,
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ return -EFAULT;
+ }
+
+ adev->params.ints_enabled = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_enable_interrupt);
+
+int av8100_reg_stby_w(
+ u8 cpd, u8 stby, u8 mclkrng)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_CPD(cpd) | AV8100_STANDBY_STBY(stby) |
+ AV8100_STANDBY_MCLKRNG(mclkrng);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_w);
+
+static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on)
+{
+ u8 val;
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value.
+ * chip_version == 1 have one common off time
+ * chip_version > 1 support different off time for hdmi and tvout. */
+ if (adev->chip_version == 1)
+ val = AV8100_HDMI_5_VOLT_TIME_OFF_TIME(hdmi_off) |
+ AV8100_HDMI_5_VOLT_TIME_ON_TIME(on);
+ else
+ val = AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(denc_off) |
+ AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(hdmi_off) |
+ AV8100_HDMI_5_VOLT_TIME_ON_TIME(on);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_HDMI_5_VOLT_TIME, val);
+
+ UNLOCK_AV8100_HW;
+
+ return retval;
+}
+
+int av8100_reg_hdmi_5_volt_time_w(u8 denc_off, u8 hdmi_off, u8 on)
+{
+ int retval;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ retval = av8100_5V_w(denc_off, hdmi_off, on);
+
+ /* Set vars */
+ if (adev->chip_version > 1)
+ adev->params.denc_off_time = denc_off;
+
+ adev->params.hdmi_off_time = hdmi_off;
+ if (on)
+ adev->params.on_time = on;
+
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_w);
+
+int av8100_reg_stby_int_mask_w(
+ u8 hpdm, u8 cpdm, u8 stbygpiocfg, u8 ipol)
+{
+ int retval;
+ u8 val;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_INTERRUPT_MASK_HPDM(hpdm) |
+ AV8100_STANDBY_INTERRUPT_MASK_CPDM(cpdm) |
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(stbygpiocfg) |
+ AV8100_STANDBY_INTERRUPT_MASK_IPOL(ipol);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY_INTERRUPT_MASK, val);
+
+ adev->params.hpdm = hpdm;
+ adev->params.cpdm = cpdm;
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_int_mask_w);
+
+int av8100_reg_stby_pend_int_w(
+ u8 hpdi, u8 cpdi, u8 oni, u8 bpdig)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_STANDBY_PENDING_INTERRUPT_HPDI(hpdi) |
+ AV8100_STANDBY_PENDING_INTERRUPT_CPDI(cpdi) |
+ AV8100_STANDBY_PENDING_INTERRUPT_ONI(oni) |
+ AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(bpdig);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_STANDBY_PENDING_INTERRUPT, val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_pend_int_w);
+
+int av8100_reg_gen_int_mask_w(
+ u8 eocm, u8 vsim, u8 vsom, u8 cecm, u8 hdcpm, u8 uovbm, u8 tem)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_INTERRUPT_MASK_EOCM(eocm) |
+ AV8100_GENERAL_INTERRUPT_MASK_VSIM(vsim) |
+ AV8100_GENERAL_INTERRUPT_MASK_VSOM(vsom) |
+ AV8100_GENERAL_INTERRUPT_MASK_CECM(cecm) |
+ AV8100_GENERAL_INTERRUPT_MASK_HDCPM(hdcpm) |
+ AV8100_GENERAL_INTERRUPT_MASK_UOVBM(uovbm) |
+ AV8100_GENERAL_INTERRUPT_MASK_TEM(tem);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_INTERRUPT_MASK, val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_mask_w);
+
+int av8100_reg_gen_int_w(
+ u8 eoci, u8 vsii, u8 vsoi, u8 ceci, u8 hdcpi, u8 uovbi)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_INTERRUPT_EOCI(eoci) |
+ AV8100_GENERAL_INTERRUPT_VSII(vsii) |
+ AV8100_GENERAL_INTERRUPT_VSOI(vsoi) |
+ AV8100_GENERAL_INTERRUPT_CECI(ceci) |
+ AV8100_GENERAL_INTERRUPT_HDCPI(hdcpi) |
+ AV8100_GENERAL_INTERRUPT_UOVBI(uovbi);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_INTERRUPT, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_w);
+
+int av8100_reg_gpio_conf_w(
+ u8 dat3dir, u8 dat3val, u8 dat2dir, u8 dat2val, u8 dat1dir,
+ u8 dat1val, u8 ucdbg)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GPIO_CONFIGURATION_DAT3DIR(dat3dir) |
+ AV8100_GPIO_CONFIGURATION_DAT3VAL(dat3val) |
+ AV8100_GPIO_CONFIGURATION_DAT2DIR(dat2dir) |
+ AV8100_GPIO_CONFIGURATION_DAT2VAL(dat2val) |
+ AV8100_GPIO_CONFIGURATION_DAT1DIR(dat1dir) |
+ AV8100_GPIO_CONFIGURATION_DAT1VAL(dat1val) |
+ AV8100_GPIO_CONFIGURATION_UCDBG(ucdbg);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GPIO_CONFIGURATION, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gpio_conf_w);
+
+int av8100_reg_gen_ctrl_w(
+ u8 fdl, u8 hld, u8 wa, u8 ra)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_GENERAL_CONTROL_FDL(fdl) |
+ AV8100_GENERAL_CONTROL_HLD(hld) |
+ AV8100_GENERAL_CONTROL_WA(wa) |
+ AV8100_GENERAL_CONTROL_RA(ra);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_GENERAL_CONTROL, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_ctrl_w);
+
+int av8100_reg_fw_dl_entry_w(
+ u8 mbyte_code_entry)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Set register value */
+ val = AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(
+ mbyte_code_entry);
+
+ /* Write to register */
+ retval = register_write_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, val);
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_fw_dl_entry_w);
+
+int av8100_reg_w(
+ u8 offset, u8 value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ i2c = adev->config.client;
+
+ /* Write to register */
+ retval = write_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to write the value to av8100 register\n");
+ UNLOCK_AV8100_HW;
+ return -EFAULT;
+ }
+
+ UNLOCK_AV8100_HW;
+ return 0;
+}
+EXPORT_SYMBOL(av8100_reg_w);
+
+int av8100_reg_stby_r(
+ u8 *cpd, u8 *stby, u8 *hpds, u8 *cpds, u8 *mclkrng)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY, &val);
+
+ /* Set return params */
+ if (cpd)
+ *cpd = AV8100_STANDBY_CPD_GET(val);
+ if (stby)
+ *stby = AV8100_STANDBY_STBY_GET(val);
+ if (hpds)
+ *hpds = AV8100_STANDBY_HPDS_GET(val);
+ if (cpds)
+ *cpds = AV8100_STANDBY_CPDS_GET(val);
+ if (mclkrng)
+ *mclkrng = AV8100_STANDBY_MCLKRNG_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_r);
+
+int av8100_reg_hdmi_5_volt_time_r(
+ u8 *denc_off_time, u8 *hdmi_off_time, u8 *on_time)
+{
+ int retval;
+ u8 val;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_HDMI_5_VOLT_TIME, &val);
+
+ /* Set return params */
+ if (adev->chip_version == 1) {
+ if (denc_off_time)
+ *denc_off_time = 0;
+ if (hdmi_off_time)
+ *hdmi_off_time =
+ AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(val);
+ } else {
+ if (denc_off_time)
+ *denc_off_time =
+ AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(val);
+ if (hdmi_off_time)
+ *hdmi_off_time =
+ AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(val);
+ }
+
+ if (on_time)
+ *on_time = AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_r);
+
+int av8100_reg_stby_int_mask_r(
+ u8 *hpdm, u8 *cpdm, u8 *stbygpiocfg, u8 *ipol)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY_INTERRUPT_MASK, &val);
+
+ /* Set return params */
+ if (hpdm)
+ *hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(val);
+ if (cpdm)
+ *cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(val);
+ if (stbygpiocfg)
+ *stbygpiocfg =
+ AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(val);
+ if (ipol)
+ *ipol = AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_int_mask_r);
+
+int av8100_reg_stby_pend_int_r(
+ u8 *hpdi, u8 *cpdi, u8 *oni, u8 *sid)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_STANDBY_PENDING_INTERRUPT,
+ &val);
+
+ /* Set return params */
+ if (hpdi)
+ *hpdi = AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(val);
+ if (cpdi)
+ *cpdi = AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(val);
+ if (oni)
+ *oni = AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(val);
+ if (sid)
+ *sid = AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_stby_pend_int_r);
+
+int av8100_reg_gen_int_mask_r(
+ u8 *eocm,
+ u8 *vsim,
+ u8 *vsom,
+ u8 *cecm,
+ u8 *hdcpm,
+ u8 *uovbm,
+ u8 *tem)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_INTERRUPT_MASK, &val);
+
+ /* Set return params */
+ if (eocm)
+ *eocm = AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(val);
+ if (vsim)
+ *vsim = AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(val);
+ if (vsom)
+ *vsom = AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(val);
+ if (cecm)
+ *cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(val);
+ if (hdcpm)
+ *hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(val);
+ if (uovbm)
+ *uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(val);
+ if (tem)
+ *tem = AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_mask_r);
+
+int av8100_reg_gen_int_r(
+ u8 *eoci,
+ u8 *vsii,
+ u8 *vsoi,
+ u8 *ceci,
+ u8 *hdcpi,
+ u8 *uovbi,
+ u8 *tei)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_INTERRUPT, &val);
+
+ /* Set return params */
+ if (eoci)
+ *eoci = AV8100_GENERAL_INTERRUPT_EOCI_GET(val);
+ if (vsii)
+ *vsii = AV8100_GENERAL_INTERRUPT_VSII_GET(val);
+ if (vsoi)
+ *vsoi = AV8100_GENERAL_INTERRUPT_VSOI_GET(val);
+ if (ceci)
+ *ceci = AV8100_GENERAL_INTERRUPT_CECI_GET(val);
+ if (hdcpi)
+ *hdcpi = AV8100_GENERAL_INTERRUPT_HDCPI_GET(val);
+ if (uovbi)
+ *uovbi = AV8100_GENERAL_INTERRUPT_UOVBI_GET(val);
+ if (tei)
+ *tei = AV8100_GENERAL_INTERRUPT_TEI_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_int_r);
+
+int av8100_reg_gen_status_r(
+ u8 *cectxerr,
+ u8 *cecrec,
+ u8 *cectrx,
+ u8 *uc,
+ u8 *onuvb,
+ u8 *hdcps)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_STATUS, &val);
+
+ /* Set return params */
+ if (cectxerr)
+ *cectxerr = AV8100_GENERAL_STATUS_CECTXERR_GET(val);
+ if (cecrec)
+ *cecrec = AV8100_GENERAL_STATUS_CECREC_GET(val);
+ if (cectrx)
+ *cectrx = AV8100_GENERAL_STATUS_CECTRX_GET(val);
+ if (uc)
+ *uc = AV8100_GENERAL_STATUS_UC_GET(val);
+ if (onuvb)
+ *onuvb = AV8100_GENERAL_STATUS_ONUVB_GET(val);
+ if (hdcps)
+ *hdcps = AV8100_GENERAL_STATUS_HDCPS_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_status_r);
+
+int av8100_reg_gpio_conf_r(
+ u8 *dat3dir,
+ u8 *dat3val,
+ u8 *dat2dir,
+ u8 *dat2val,
+ u8 *dat1dir,
+ u8 *dat1val,
+ u8 *ucdbg)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GPIO_CONFIGURATION, &val);
+
+ /* Set return params */
+ if (dat3dir)
+ *dat3dir = AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(val);
+ if (dat3val)
+ *dat3val = AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(val);
+ if (dat2dir)
+ *dat2dir = AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(val);
+ if (dat2val)
+ *dat2val = AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(val);
+ if (dat1dir)
+ *dat1dir = AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(val);
+ if (dat1val)
+ *dat1val = AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(val);
+ if (ucdbg)
+ *ucdbg = AV8100_GPIO_CONFIGURATION_UCDBG_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gpio_conf_r);
+
+int av8100_reg_gen_ctrl_r(
+ u8 *fdl,
+ u8 *hld,
+ u8 *wa,
+ u8 *ra)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_GENERAL_CONTROL, &val);
+ /* Set return params */
+ if (fdl)
+ *fdl = AV8100_GENERAL_CONTROL_FDL_GET(val);
+ if (hld)
+ *hld = AV8100_GENERAL_CONTROL_HLD_GET(val);
+ if (wa)
+ *wa = AV8100_GENERAL_CONTROL_WA_GET(val);
+ if (ra)
+ *ra = AV8100_GENERAL_CONTROL_RA_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_gen_ctrl_r);
+
+int av8100_reg_fw_dl_entry_r(
+ u8 *mbyte_code_entry)
+{
+ int retval;
+ u8 val;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ /* Read from register */
+ retval = register_read_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, &val);
+
+ /* Set return params */
+ if (mbyte_code_entry)
+ *mbyte_code_entry =
+ AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(val);
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_fw_dl_entry_r);
+
+int av8100_reg_r(
+ u8 offset,
+ u8 *value)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ i2c = adev->config.client;
+
+ /* Read from register */
+ retval = read_single_byte(i2c, offset, value);
+ if (retval) {
+ dev_dbg(adev->dev,
+ "Failed to read the value from av8100 register\n");
+ retval = -EFAULT;
+ goto av8100_register_read_out;
+ }
+
+av8100_register_read_out:
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_reg_r);
+
+int av8100_conf_get(enum av8100_command_type command_type,
+ union av8100_configuration *config)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ /* Put configuration data to the corresponding data struct depending
+ * on command type */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ memcpy(&config->video_input_format,
+ &adev->config.hdmi_video_input_cmd,
+ sizeof(struct av8100_video_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ memcpy(&config->audio_input_format,
+ &adev->config.hdmi_audio_input_cmd,
+ sizeof(struct av8100_audio_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ memcpy(&config->video_output_format,
+ &adev->config.hdmi_video_output_cmd,
+ sizeof(struct av8100_video_output_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ memcpy(&config->video_scaling_format,
+ &adev->config.hdmi_video_scaling_cmd,
+ sizeof(struct av8100_video_scaling_format_cmd));
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ config->color_transform = adev->config.color_transform;
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ memcpy(&config->cec_message_write_format,
+ &adev->config.hdmi_cec_message_write_cmd,
+ sizeof(struct av8100_cec_message_write_format_cmd));
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ memcpy(&config->cec_message_read_back_format,
+ &adev->config.hdmi_cec_message_read_back_cmd,
+ sizeof(struct av8100_cec_message_read_back_format_cmd));
+ break;
+
+ case AV8100_COMMAND_DENC:
+ memcpy(&config->denc_format, &adev->config.hdmi_denc_cmd,
+ sizeof(struct av8100_denc_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ memcpy(&config->hdmi_format, &adev->config.hdmi_cmd,
+ sizeof(struct av8100_hdmi_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ memcpy(&config->hdcp_send_key_format,
+ &adev->config.hdmi_hdcp_send_key_cmd,
+ sizeof(struct av8100_hdcp_send_key_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ memcpy(&config->hdcp_management_format,
+ &adev->config.hdmi_hdcp_management_format_cmd,
+ sizeof(struct av8100_hdcp_management_format_cmd));
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ memcpy(&config->infoframes_format,
+ &adev->config.hdmi_infoframes_cmd,
+ sizeof(struct av8100_infoframes_format_cmd));
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ memcpy(&config->edid_section_readback_format,
+ &adev->config.hdmi_edid_section_readback_cmd,
+ sizeof(struct
+ av8100_edid_section_readback_format_cmd));
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ memcpy(&config->pattern_generator_format,
+ &adev->config.hdmi_pattern_generator_cmd,
+ sizeof(struct av8100_pattern_generator_format_cmd));
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ memcpy(&config->fuse_aes_key_format,
+ &adev->config.hdmi_fuse_aes_key_cmd,
+ sizeof(struct av8100_fuse_aes_key_format_cmd));
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_conf_get);
+
+int av8100_conf_prep(enum av8100_command_type command_type,
+ union av8100_configuration *config)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!config || !adev)
+ return -EINVAL;
+
+ /* Put configuration data to the corresponding data struct depending
+ * on command type */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ memcpy(&adev->config.hdmi_video_input_cmd,
+ &config->video_input_format,
+ sizeof(struct av8100_video_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ memcpy(&adev->config.hdmi_audio_input_cmd,
+ &config->audio_input_format,
+ sizeof(struct av8100_audio_input_format_cmd));
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ memcpy(&adev->config.hdmi_video_output_cmd,
+ &config->video_output_format,
+ sizeof(struct av8100_video_output_format_cmd));
+
+ /* Set params that depend on video output */
+ av8100_config_video_output_dep(adev->config.
+ hdmi_video_output_cmd.video_output_cea_vesa);
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ memcpy(&adev->config.hdmi_video_scaling_cmd,
+ &config->video_scaling_format,
+ sizeof(struct av8100_video_scaling_format_cmd));
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ adev->config.color_transform = config->color_transform;
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ memcpy(&adev->config.hdmi_cec_message_write_cmd,
+ &config->cec_message_write_format,
+ sizeof(struct av8100_cec_message_write_format_cmd));
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ memcpy(&adev->config.hdmi_cec_message_read_back_cmd,
+ &config->cec_message_read_back_format,
+ sizeof(struct av8100_cec_message_read_back_format_cmd));
+ break;
+
+ case AV8100_COMMAND_DENC:
+ memcpy(&adev->config.hdmi_denc_cmd, &config->denc_format,
+ sizeof(struct av8100_denc_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ memcpy(&adev->config.hdmi_cmd, &config->hdmi_format,
+ sizeof(struct av8100_hdmi_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ memcpy(&adev->config.hdmi_hdcp_send_key_cmd,
+ &config->hdcp_send_key_format,
+ sizeof(struct av8100_hdcp_send_key_format_cmd));
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ memcpy(&adev->config.hdmi_hdcp_management_format_cmd,
+ &config->hdcp_management_format,
+ sizeof(struct av8100_hdcp_management_format_cmd));
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ memcpy(&adev->config.hdmi_infoframes_cmd,
+ &config->infoframes_format,
+ sizeof(struct av8100_infoframes_format_cmd));
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ memcpy(&adev->config.hdmi_edid_section_readback_cmd,
+ &config->edid_section_readback_format,
+ sizeof(struct
+ av8100_edid_section_readback_format_cmd));
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ memcpy(&adev->config.hdmi_pattern_generator_cmd,
+ &config->pattern_generator_format,
+ sizeof(struct av8100_pattern_generator_format_cmd));
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ memcpy(&adev->config.hdmi_fuse_aes_key_cmd,
+ &config->fuse_aes_key_format,
+ sizeof(struct av8100_fuse_aes_key_format_cmd));
+ break;
+
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(av8100_conf_prep);
+
+int av8100_conf_w(enum av8100_command_type command_type,
+ u8 *return_buffer_length,
+ u8 *return_buffer, enum interface_type if_type)
+{
+ int retval = 0;
+ u8 cmd_buffer[AV8100_COMMAND_MAX_LENGTH];
+ u32 cmd_length = 0;
+ struct i2c_client *i2c;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ if (return_buffer_length)
+ *return_buffer_length = 0;
+
+ i2c = adev->config.client;
+
+ memset(&cmd_buffer, 0x00, AV8100_COMMAND_MAX_LENGTH);
+
+#define PRNK_MODE(_m) dev_dbg(adev->dev, "cmd: " #_m "\n");
+
+ /* Fill the command buffer with configuration data */
+ switch (command_type) {
+ case AV8100_COMMAND_VIDEO_INPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_INPUT_FORMAT);
+ configuration_video_input_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_AUDIO_INPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_AUDIO_INPUT_FORMAT);
+ configuration_audio_input_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT);
+ configuration_video_output_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_VIDEO_SCALING_FORMAT:
+ PRNK_MODE(AV8100_COMMAND_VIDEO_SCALING_FORMAT);
+ configuration_video_scaling_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_COLORSPACECONVERSION:
+ PRNK_MODE(AV8100_COMMAND_COLORSPACECONVERSION);
+ configuration_colorspace_conversion_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_WRITE:
+ PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_WRITE);
+ configuration_cec_message_write_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_CEC_MESSAGE_READ_BACK:
+ PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_READ_BACK);
+ configuration_cec_message_read_get(cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_DENC:
+ PRNK_MODE(AV8100_COMMAND_DENC);
+ configuration_denc_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDMI:
+ PRNK_MODE(AV8100_COMMAND_HDMI);
+ configuration_hdmi_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDCP_SENDKEY:
+ PRNK_MODE(AV8100_COMMAND_HDCP_SENDKEY);
+ configuration_hdcp_sendkey_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_HDCP_MANAGEMENT:
+ PRNK_MODE(AV8100_COMMAND_HDCP_MANAGEMENT);
+ configuration_hdcp_management_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_INFOFRAMES:
+ PRNK_MODE(AV8100_COMMAND_INFOFRAMES);
+ configuration_infoframe_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_EDID_SECTION_READBACK:
+ PRNK_MODE(AV8100_COMMAND_EDID_SECTION_READBACK);
+ av8100_edid_section_readback_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ case AV8100_COMMAND_PATTERNGENERATOR:
+ PRNK_MODE(AV8100_COMMAND_PATTERNGENERATOR);
+ configuration_pattern_generator_get(adev, cmd_buffer,
+ &cmd_length);
+ break;
+
+ case AV8100_COMMAND_FUSE_AES_KEY:
+ PRNK_MODE(AV8100_COMMAND_FUSE_AES_KEY);
+ configuration_fuse_aes_key_get(adev, cmd_buffer, &cmd_length);
+ break;
+
+ default:
+ dev_dbg(adev->dev, "Invalid command type\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ LOCK_AV8100_HW;
+
+ if (if_type == I2C_INTERFACE) {
+ int cnt = 0;
+ int cnt_max;
+
+ dev_dbg(adev->dev, "av8100_conf_w cmd_type:%02x length:%02x ",
+ command_type, cmd_length);
+ dev_dbg(adev->dev, "buffer: ");
+ while (cnt < cmd_length) {
+ dev_dbg(adev->dev, "%02x ", cmd_buffer[cnt]);
+ cnt++;
+ }
+
+ /* Write the command buffer */
+ retval = write_multi_byte(i2c,
+ AV8100_CMD_BUF_OFFSET, cmd_buffer, cmd_length);
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+ /* Write the command */
+ retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET,
+ command_type);
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+
+ /* Get the first return byte */
+ mdelay(AV8100_WAITTIME_1MS);
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = get_command_return_first(i2c, command_type);
+ while (retval && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = get_command_return_first(i2c, command_type);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "first return cnt:%d\n", cnt);
+
+ if (retval) {
+ UNLOCK_AV8100_HW;
+ return retval;
+ }
+
+ retval = get_command_return_data(i2c, command_type, cmd_buffer,
+ return_buffer_length, return_buffer);
+ } else if (if_type == DSI_INTERFACE) {
+ /* TODO */
+ } else {
+ retval = -EINVAL;
+ dev_dbg(adev->dev, "Invalid command type\n");
+ }
+
+ if (command_type == AV8100_COMMAND_HDMI) {
+ adev->status.hdmi_on = ((adev->config.hdmi_cmd.
+ hdmi_mode == AV8100_HDMI_ON) &&
+ (adev->config.hdmi_cmd.hdmi_format == AV8100_HDMI));
+ }
+
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_conf_w);
+
+int av8100_conf_w_raw(enum av8100_command_type command_type,
+ u8 buffer_length,
+ u8 *buffer,
+ u8 *return_buffer_length,
+ u8 *return_buffer)
+{
+ int retval = 0;
+ struct i2c_client *i2c;
+ int cnt;
+ int cnt_max;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN)
+ return -EINVAL;
+
+ LOCK_AV8100_HW;
+
+ if (return_buffer_length)
+ *return_buffer_length = 0;
+
+ i2c = adev->config.client;
+
+ /* Write the command buffer */
+ retval = write_multi_byte(i2c,
+ AV8100_CMD_BUF_OFFSET, buffer, buffer_length);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+ /* Write the command */
+ retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET,
+ command_type);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+
+ /* Get the first return byte */
+ mdelay(AV8100_WAITTIME_1MS);
+ cnt = 0;
+ cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]);
+ retval = get_command_return_first(i2c, command_type);
+ while (retval && (cnt < cnt_max)) {
+ mdelay(waittime_retry[cnt]);
+ retval = get_command_return_first(i2c, command_type);
+ cnt++;
+ }
+ dev_dbg(adev->dev, "first return cnt:%d\n", cnt);
+ if (retval)
+ goto av8100_conf_w_raw_out;
+
+ retval = get_command_return_data(i2c, command_type, buffer,
+ return_buffer_length, return_buffer);
+
+av8100_conf_w_raw_out:
+ UNLOCK_AV8100_HW;
+ return retval;
+}
+EXPORT_SYMBOL(av8100_conf_w_raw);
+
+struct av8100_status av8100_status_get(void)
+{
+ struct av8100_status status = {0};
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (adev)
+ return adev->status;
+ else
+ return status;
+}
+EXPORT_SYMBOL(av8100_status_get);
+
+enum av8100_output_CEA_VESA av8100_video_output_format_get(int xres,
+ int yres,
+ int htot,
+ int vtot,
+ int pixelclk,
+ bool interlaced)
+{
+ enum av8100_output_CEA_VESA index = 1;
+ int yres_div = !interlaced ? 1 : 2;
+ int hres_div = 1;
+ long freq1;
+ long freq2;
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ /*
+ * 720_576_I need a divider for hact and htot since
+ * these params need to be twice as large as expected in av8100_all_cea,
+ * which is used as input parameter to video input config.
+ */
+ if ((xres == 720) && (yres == 576) && (interlaced == true))
+ hres_div = 2;
+
+ freq1 = 1000000 / htot * 1000000 / vtot / pixelclk + 1;
+ while (index < sizeof(av8100_all_cea)/sizeof(struct av8100_cea)) {
+ freq2 = av8100_all_cea[index].frequence /
+ av8100_all_cea[index].htotale /
+ av8100_all_cea[index].vtotale;
+
+ dev_dbg(adev->dev, "freq1:%ld freq2:%ld\n", freq1, freq2);
+ if ((xres == av8100_all_cea[index].hactive / hres_div) &&
+ (yres == av8100_all_cea[index].vactive * yres_div) &&
+ (htot == av8100_all_cea[index].htotale / hres_div) &&
+ (vtot == av8100_all_cea[index].vtotale) &&
+ (abs(freq1 - freq2) < 2)) {
+ goto av8100_video_output_format_get_out;
+ }
+ index++;
+ }
+
+av8100_video_output_format_get_out:
+ dev_dbg(adev->dev, "av8100_video_output_format_get %d %d %d %d %d\n",
+ xres, yres, htot, vtot, index);
+ return index;
+}
+EXPORT_SYMBOL(av8100_video_output_format_get);
+
+void av8100_hdmi_event_cb_set(void (*hdmi_ev_cb)(enum av8100_hdmi_event))
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (adev)
+ adev->params.hdmi_ev_cb = hdmi_ev_cb;
+}
+EXPORT_SYMBOL(av8100_hdmi_event_cb_set);
+
+u8 av8100_ver_get(void)
+{
+ struct av8100_device *adev;
+
+ adev = devnr_to_adev(AV8100_DEVNR_DEFAULT);
+ if (!adev)
+ return -EINVAL;
+
+ return adev->chip_version;
+}
+EXPORT_SYMBOL(av8100_ver_get);
+
+static const struct color_conversion_cmd *get_color_transform_cmd(
+ struct av8100_device *adev,
+ enum av8100_color_transform transform)
+{
+ const struct color_conversion_cmd *result;
+
+ switch (transform) {
+ case AV8100_COLOR_TRANSFORM_INDENTITY:
+ result = &col_trans_identity;
+ break;
+ case AV8100_COLOR_TRANSFORM_INDENTITY_CLAMP_YUV:
+ result = &col_trans_identity_clamp_yuv;
+ break;
+ case AV8100_COLOR_TRANSFORM_YUV_TO_RGB:
+ if (adev->chip_version == AV8100_CHIPVER_1)
+ result = &col_trans_yuv_to_rgb_v1;
+ else
+ result = &col_trans_yuv_to_rgb_v2;
+ break;
+ case AV8100_COLOR_TRANSFORM_YUV_TO_DENC:
+ result = &col_trans_yuv_to_denc;
+ break;
+ case AV8100_COLOR_TRANSFORM_RGB_TO_DENC:
+ result = &col_trans_rgb_to_denc;
+ break;
+ default:
+ dev_warn(adev->dev, "Unknown color space transform\n");
+ result = &col_trans_identity;
+ break;
+ }
+ return result;
+}
+
+static int av8100_open(struct inode *inode, struct file *filp)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int av8100_release(struct inode *inode, struct file *filp)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static long av8100_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+
+int av8100_device_register(struct av8100_device *adev)
+{
+ adev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ adev->miscdev.name = "av8100";
+ adev->miscdev.fops = &av8100_fops;
+
+ if (misc_register(&adev->miscdev)) {
+ pr_err("av8100 misc_register failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int av8100_init_device(struct av8100_device *adev, struct device *dev)
+{
+ adev->dev = dev;
+
+ if (av8100_config_init(adev)) {
+ dev_info(dev, "av8100_config_init failed\n");
+ return -EFAULT;
+ }
+
+ if (av8100_params_init(adev)) {
+ dev_info(dev, "av8100_params_init failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __devinit av8100_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct av8100_platform_data *pdata = i2c_client->dev.platform_data;
+ struct device *dev;
+ struct av8100_device *adev;
+
+ dev = &i2c_client->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Allocate device data */
+ adev = kzalloc(sizeof(struct av8100_device), GFP_KERNEL);
+ if (!adev) {
+ dev_info(dev, "%s: Alloc failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Add to list */
+ list_add_tail(&adev->list, &av8100_device_list);
+
+ av8100_device_register(adev);
+
+ av8100_init_device(adev, dev);
+
+ av8100_set_state(adev, AV8100_OPMODE_UNDEFINED);
+
+ if (!i2c_check_functionality(i2c_client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ ret = -ENODEV;
+ dev_info(dev, "av8100 i2c_check_functionality failed\n");
+ goto err1;
+ }
+
+ init_waitqueue_head(&adev->event);
+
+ adev->config.client = i2c_client;
+ adev->config.id = (struct i2c_device_id *) id;
+ i2c_set_clientdata(i2c_client, &adev->config);
+
+ kthread_run(av8100_thread, adev, "av8100_thread");
+
+ /* Get regulator resource */
+ if (pdata->regulator_pwr_id) {
+ adev->params.regulator_pwr = regulator_get(dev,
+ pdata->regulator_pwr_id);
+ if (IS_ERR(adev->params.regulator_pwr)) {
+ ret = PTR_ERR(adev->params.regulator_pwr);
+ dev_warn(dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_pwr_id);
+ adev->params.regulator_pwr = NULL;
+ goto err1;
+ }
+ }
+
+ /* Get clock resource */
+ if (pdata->inputclk_id) {
+ adev->params.inputclk = clk_get(NULL, pdata->inputclk_id);
+ if (IS_ERR(adev->params.inputclk)) {
+ adev->params.inputclk = NULL;
+ dev_warn(dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->inputclk_id);
+ }
+ }
+
+ av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN);
+
+
+ if (av8100_powerup1(adev)) {
+ dev_err(adev->dev, "av8100_powerup1 fail\n");
+ ret = -EFAULT;
+ goto err1;
+ }
+
+ /* Obtain the chip version */
+ ret = av8100_reg_stby_pend_int_r(NULL, NULL, NULL,
+ &adev->chip_version);
+ if (ret) {
+ dev_err(adev->dev, "Failed to read chip version\n");
+ goto err2;
+ }
+
+ dev_info(adev->dev, "chip version:%d\n", adev->chip_version);
+
+ switch (adev->chip_version) {
+ case AV8100_CHIPVER_1:
+ case AV8100_CHIPVER_2:
+ break;
+
+ default:
+ dev_err(adev->dev, "Unsupported chip version:%d\n",
+ adev->chip_version);
+ ret = -EINVAL;
+ goto err2;
+ break;
+ }
+err2:
+ (void) av8100_powerdown();
+err1:
+ return ret;
+}
+
+static int __devexit av8100_remove(struct i2c_client *i2c_client)
+{
+ struct av8100_device *adev;
+
+ adev = dev_to_adev(&i2c_client->dev);
+ if (!adev)
+ return -EFAULT;
+
+ dev_dbg(adev->dev, "%s\n", __func__);
+
+ if (adev->params.inputclk)
+ clk_put(adev->params.inputclk);
+
+ /* Release regulator resource */
+ if (adev->params.regulator_pwr)
+ regulator_put(adev->params.regulator_pwr);
+
+ misc_deregister(&adev->miscdev);
+
+ /* Remove from list */
+ list_del(&adev->list);
+
+ /* Free device data */
+ kfree(adev);
+
+ return 0;
+}
+
+int av8100_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ if (i2c_add_driver(&av8100_driver)) {
+ pr_err("av8100 i2c_add_driver failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+module_init(av8100_init);
+
+void av8100_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ i2c_del_driver(&av8100_driver);
+}
+module_exit(av8100_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson hdmi display driver");
diff --git a/drivers/video/av8100/av8100_regs.h b/drivers/video/av8100/av8100_regs.h
new file mode 100644
index 00000000000..6ed9000987a
--- /dev/null
+++ b/drivers/video/av8100/av8100_regs.h
@@ -0,0 +1,346 @@
+
+#define AV8100_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define AV8100_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define AV8100_STANDBY 0x00000000
+#define AV8100_STANDBY_CPD_SHIFT 0
+#define AV8100_STANDBY_CPD_MASK 0x00000001
+#define AV8100_STANDBY_CPD_HIGH 1
+#define AV8100_STANDBY_CPD_LOW 0
+#define AV8100_STANDBY_CPD(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, CPD, __x)
+#define AV8100_STANDBY_CPD_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, CPD, __x)
+#define AV8100_STANDBY_STBY_SHIFT 1
+#define AV8100_STANDBY_STBY_MASK 0x00000002
+#define AV8100_STANDBY_STBY_HIGH 1
+#define AV8100_STANDBY_STBY_LOW 0
+#define AV8100_STANDBY_STBY(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, STBY, __x)
+#define AV8100_STANDBY_STBY_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, STBY, __x)
+#define AV8100_STANDBY_HPDS_SHIFT 2
+#define AV8100_STANDBY_HPDS_MASK 0x00000004
+#define AV8100_STANDBY_HPDS(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, HPDS, __x)
+#define AV8100_STANDBY_HPDS_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, HPDS, __x)
+#define AV8100_STANDBY_CPDS_SHIFT 3
+#define AV8100_STANDBY_CPDS_MASK 0x00000008
+#define AV8100_STANDBY_CPDS(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, CPDS, __x)
+#define AV8100_STANDBY_CPDS_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, CPDS, __x)
+#define AV8100_STANDBY_MCLKRNG_SHIFT 4
+#define AV8100_STANDBY_MCLKRNG_MASK 0x000000F0
+#define AV8100_STANDBY_MCLKRNG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY, MCLKRNG, __x)
+#define AV8100_STANDBY_MCLKRNG_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY, MCLKRNG, __x)
+#define AV8100_HDMI_5_VOLT_TIME 0x00000001
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_SHIFT 0
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_MASK 0x0000001F
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_SHIFT 0
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_MASK 0x00000003
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_SHIFT 2
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_MASK 0x0000001C
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_SHIFT 5
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_MASK 0x000000E0
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME(__x) \
+ AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x)
+#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(__x) \
+ AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK 0x00000002
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_SHIFT 0
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_MASK 0x00000001
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_SHIFT 1
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_MASK 0x00000002
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_SHIFT 2
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_MASK 0x0000000C
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT 0x00
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_ALT 0x01
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT0 0x02
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT1 0x03
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_SHIFT 7
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_MASK 0x00000080
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_HIGH 1
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW 0
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x)
+#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT 0x00000003
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_SHIFT 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_MASK 0x00000001
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_SHIFT 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_MASK 0x00000002
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_SHIFT 2
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_MASK 0x00000004
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_SHIFT 4
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_MASK 0x000000F0
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(__x) \
+ AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x)
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_SHIFT 6
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_MASK 0x00000040
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH 1
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW 0
+#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(__x) \
+ AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, BPDIG, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_SHIFT 0
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_MASK 0x00000001
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_SHIFT 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_MASK 0x00000002
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_SHIFT 2
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_SHIFT 3
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_MASK 0x00000008
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_SHIFT 4
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_MASK 0x00000010
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_SHIFT 5
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_MASK 0x00000020
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_SHIFT 6
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_MASK 0x00000040
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_HIGH 1
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW 0
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x)
+#define AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x)
+#define AV8100_GENERAL_INTERRUPT 0x00000005
+#define AV8100_GENERAL_INTERRUPT_EOCI_SHIFT 0
+#define AV8100_GENERAL_INTERRUPT_EOCI_MASK 0x00000001
+#define AV8100_GENERAL_INTERRUPT_EOCI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, EOCI, __x)
+#define AV8100_GENERAL_INTERRUPT_EOCI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, EOCI, __x)
+#define AV8100_GENERAL_INTERRUPT_VSII_SHIFT 1
+#define AV8100_GENERAL_INTERRUPT_VSII_MASK 0x00000002
+#define AV8100_GENERAL_INTERRUPT_VSII(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSII, __x)
+#define AV8100_GENERAL_INTERRUPT_VSII_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSII, __x)
+#define AV8100_GENERAL_INTERRUPT_VSOI_SHIFT 2
+#define AV8100_GENERAL_INTERRUPT_VSOI_MASK 0x00000004
+#define AV8100_GENERAL_INTERRUPT_VSOI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSOI, __x)
+#define AV8100_GENERAL_INTERRUPT_VSOI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSOI, __x)
+#define AV8100_GENERAL_INTERRUPT_CECI_SHIFT 3
+#define AV8100_GENERAL_INTERRUPT_CECI_MASK 0x00000008
+#define AV8100_GENERAL_INTERRUPT_CECI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, CECI, __x)
+#define AV8100_GENERAL_INTERRUPT_CECI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, CECI, __x)
+#define AV8100_GENERAL_INTERRUPT_HDCPI_SHIFT 4
+#define AV8100_GENERAL_INTERRUPT_HDCPI_MASK 0x00000010
+#define AV8100_GENERAL_INTERRUPT_HDCPI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, HDCPI, __x)
+#define AV8100_GENERAL_INTERRUPT_HDCPI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, HDCPI, __x)
+#define AV8100_GENERAL_INTERRUPT_UOVBI_SHIFT 5
+#define AV8100_GENERAL_INTERRUPT_UOVBI_MASK 0x00000020
+#define AV8100_GENERAL_INTERRUPT_UOVBI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, UOVBI, __x)
+#define AV8100_GENERAL_INTERRUPT_UOVBI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, UOVBI, __x)
+#define AV8100_GENERAL_INTERRUPT_TEI_SHIFT 6
+#define AV8100_GENERAL_INTERRUPT_TEI_MASK 0x00000040
+#define AV8100_GENERAL_INTERRUPT_TEI(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, TEI, __x)
+#define AV8100_GENERAL_INTERRUPT_TEI_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, TEI, __x)
+#define AV8100_GENERAL_STATUS 0x00000006
+#define AV8100_GENERAL_STATUS_CECTXERR_SHIFT 0
+#define AV8100_GENERAL_STATUS_CECTXERR_MASK 0x00000001
+#define AV8100_GENERAL_STATUS_CECTXERR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTXERR, __x)
+#define AV8100_GENERAL_STATUS_CECREC_SHIFT 1
+#define AV8100_GENERAL_STATUS_CECREC_MASK 0x00000002
+#define AV8100_GENERAL_STATUS_CECREC_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECREC, __x)
+#define AV8100_GENERAL_STATUS_CECTRX_SHIFT 2
+#define AV8100_GENERAL_STATUS_CECTRX_MASK 0x00000004
+#define AV8100_GENERAL_STATUS_CECTRX_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTRX, __x)
+#define AV8100_GENERAL_STATUS_UC_SHIFT 3
+#define AV8100_GENERAL_STATUS_UC_MASK 0x00000008
+#define AV8100_GENERAL_STATUS_UC_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, UC, __x)
+#define AV8100_GENERAL_STATUS_ONUVB_SHIFT 4
+#define AV8100_GENERAL_STATUS_ONUVB_MASK 0x00000010
+#define AV8100_GENERAL_STATUS_ONUVB_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, ONUVB, __x)
+#define AV8100_GENERAL_STATUS_HDCPS_SHIFT 5
+#define AV8100_GENERAL_STATUS_HDCPS_MASK 0x000000E0
+#define AV8100_GENERAL_STATUS_HDCPS_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_STATUS, HDCPS, __x)
+#define AV8100_GPIO_CONFIGURATION 0x00000007
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_SHIFT 0
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_MASK 0x00000001
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_SHIFT 1
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_MASK 0x00000002
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_SHIFT 2
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_MASK 0x00000004
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_SHIFT 3
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_MASK 0x00000008
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_SHIFT 4
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_MASK 0x00000010
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_SHIFT 5
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_MASK 0x00000020
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x)
+#define AV8100_GPIO_CONFIGURATION_UCDBG_SHIFT 6
+#define AV8100_GPIO_CONFIGURATION_UCDBG_MASK 0x00000040
+#define AV8100_GPIO_CONFIGURATION_UCDBG(__x) \
+ AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, UCDBG, __x)
+#define AV8100_GPIO_CONFIGURATION_UCDBG_GET(__x) \
+ AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, UCDBG, __x)
+#define AV8100_GENERAL_CONTROL 0x00000008
+#define AV8100_GENERAL_CONTROL_FDL_SHIFT 4
+#define AV8100_GENERAL_CONTROL_FDL_MASK 0x00000010
+#define AV8100_GENERAL_CONTROL_FDL_HIGH 1
+#define AV8100_GENERAL_CONTROL_FDL_LOW 0
+#define AV8100_GENERAL_CONTROL_FDL(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, FDL, __x)
+#define AV8100_GENERAL_CONTROL_FDL_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, FDL, __x)
+#define AV8100_GENERAL_CONTROL_HLD_SHIFT 5
+#define AV8100_GENERAL_CONTROL_HLD_MASK 0x00000020
+#define AV8100_GENERAL_CONTROL_HLD_HIGH 1
+#define AV8100_GENERAL_CONTROL_HLD_LOW 0
+#define AV8100_GENERAL_CONTROL_HLD(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, HLD, __x)
+#define AV8100_GENERAL_CONTROL_HLD_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, HLD, __x)
+#define AV8100_GENERAL_CONTROL_WA_SHIFT 6
+#define AV8100_GENERAL_CONTROL_WA_MASK 0x00000040
+#define AV8100_GENERAL_CONTROL_WA_HIGH 1
+#define AV8100_GENERAL_CONTROL_WA_LOW 0
+#define AV8100_GENERAL_CONTROL_WA(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, WA, __x)
+#define AV8100_GENERAL_CONTROL_WA_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, WA, __x)
+#define AV8100_GENERAL_CONTROL_RA_SHIFT 7
+#define AV8100_GENERAL_CONTROL_RA_MASK 0x00000080
+#define AV8100_GENERAL_CONTROL_RA_HIGH 1
+#define AV8100_GENERAL_CONTROL_RA_LOW 0
+#define AV8100_GENERAL_CONTROL_RA(__x) \
+ AV8100_VAL2REG(AV8100_GENERAL_CONTROL, RA, __x)
+#define AV8100_GENERAL_CONTROL_RA_GET(__x) \
+ AV8100_REG2VAL(AV8100_GENERAL_CONTROL, RA, __x)
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY 0x0000000F
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_SHIFT 0
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_MASK 0x000000FF
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(__x) \
+ AV8100_VAL2REG(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x)
+#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(__x) \
+ AV8100_REG2VAL(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x)
diff --git a/drivers/video/av8100/hdmi.c b/drivers/video/av8100/hdmi.c
new file mode 100644
index 00000000000..3159c4446f1
--- /dev/null
+++ b/drivers/video/av8100/hdmi.c
@@ -0,0 +1,2479 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson HDMI driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/ctype.h>
+#include "hdmi_loc.h"
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#define SYSFS_EVENT_FILENAME "evread"
+#define HDMI_DEVNR_DEFAULT 0
+
+DEFINE_MUTEX(hdmi_events_mutex);
+#define LOCK_HDMI_EVENTS mutex_lock(&hdmi_events_mutex)
+#define UNLOCK_HDMI_EVENTS mutex_unlock(&hdmi_events_mutex)
+#define EVENTS_MASK 0xFF
+
+struct hdmi_device {
+ struct list_head list;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct hdmi_sysfs_data sysfs_data;
+ int events;
+ int events_mask;
+ wait_queue_head_t event_wq;
+ bool events_received;
+ int devnr;
+};
+
+/* List of devices */
+static LIST_HEAD(hdmi_device_list);
+
+static ssize_t store_storeastext(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_plugdeten(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_edidread(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_edidread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_ceceven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_cecread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_cecsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_infofrsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_hdcpeven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpchkaesotp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_hdcpstateget(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_evread(struct device *dev, struct device_attribute *attr,
+ char *buf);
+static ssize_t store_evclr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_audiocfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_plugstatus(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_poweronoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_poweronoff(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_evwakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static const struct device_attribute hdmi_sysfs_attrs[] = {
+ __ATTR(storeastext, S_IWUSR, NULL, store_storeastext),
+ __ATTR(plugdeten, S_IWUSR, NULL, store_plugdeten),
+ __ATTR(edidread, S_IRUGO | S_IWUSR, show_edidread, store_edidread),
+ __ATTR(ceceven, S_IWUSR, NULL, store_ceceven),
+ __ATTR(cecread, S_IRUGO, show_cecread, NULL),
+ __ATTR(cecsend, S_IWUSR, NULL, store_cecsend),
+ __ATTR(infofrsend, S_IWUSR, NULL, store_infofrsend),
+ __ATTR(hdcpeven, S_IWUSR, NULL, store_hdcpeven),
+ __ATTR(hdcpchkaesotp, S_IRUGO, show_hdcpchkaesotp, NULL),
+ __ATTR(hdcpfuseaes, S_IRUGO | S_IWUSR, show_hdcpfuseaes,
+ store_hdcpfuseaes),
+ __ATTR(hdcploadaes, S_IRUGO | S_IWUSR, show_hdcploadaes,
+ store_hdcploadaes),
+ __ATTR(hdcpauthencr, S_IRUGO | S_IWUSR, show_hdcpauthencr,
+ store_hdcpauthencr),
+ __ATTR(hdcpstateget, S_IRUGO, show_hdcpstateget, NULL),
+ __ATTR(evread, S_IRUGO, show_evread, NULL),
+ __ATTR(evclr, S_IWUSR, NULL, store_evclr),
+ __ATTR(audiocfg, S_IWUSR, NULL, store_audiocfg),
+ __ATTR(plugstatus, S_IRUGO, show_plugstatus, NULL),
+ __ATTR(poweronoff, S_IRUGO | S_IWUSR, show_poweronoff,
+ store_poweronoff),
+ __ATTR(evwakeup, S_IWUSR, NULL, store_evwakeup),
+ __ATTR_NULL
+};
+
+/* Hex to int conversion */
+static unsigned int htoi(const char *ptr)
+{
+ unsigned int value = 0;
+ char ch;
+
+ if (!ptr)
+ return 0;
+
+ ch = *ptr;
+ if (isdigit(ch))
+ value = ch - '0';
+ else
+ value = toupper(ch) - 'A' + 10;
+
+ value <<= 4;
+ ch = *(++ptr);
+
+ if (isdigit(ch))
+ value += ch - '0';
+ else
+ value += toupper(ch) - 'A' + 10;
+
+ return value;
+}
+
+static struct hdmi_device *dev_to_hdev(struct device *dev)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct hdmi_device *hdmi_dev;
+ int cnt = 0;
+
+ list_for_each(element, &hdmi_device_list) {
+ hdmi_dev = list_entry(element, struct hdmi_device, list);
+ if (hdmi_dev->dev == dev)
+ return hdmi_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static struct hdmi_device *devnr_to_hdev(int devnr)
+{
+ /* Get device from list of devices */
+ struct list_head *element;
+ struct hdmi_device *hdmi_dev;
+ int cnt = 0;
+
+ list_for_each(element, &hdmi_device_list) {
+ hdmi_dev = list_entry(element, struct hdmi_device, list);
+ if (cnt == devnr)
+ return hdmi_dev;
+ cnt++;
+ }
+
+ return NULL;
+}
+
+static int event_enable(struct hdmi_device *hdev, bool enable,
+ enum hdmi_event ev)
+{
+ struct kobject *kobj = &hdev->dev->kobj;
+
+ dev_dbg(hdev->dev, "enable_event %d %02x\n", enable, ev);
+ if (enable)
+ hdev->events_mask |= ev;
+ else
+ hdev->events_mask &= ~ev;
+
+ if (hdev->events & ev) {
+ /* Report pending event */
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ wake_up_interruptible(&hdev->event_wq);
+ }
+
+ return 0;
+}
+
+static int plugdeten(struct hdmi_device *hdev, struct plug_detect *pldet)
+{
+ struct av8100_status status;
+ u8 denc_off_time = 0;
+ int retval;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ event_enable(hdev, pldet->hdmi_detect_enable != 0,
+ HDMI_EVENT_HDMI_PLUGIN);
+ event_enable(hdev, pldet->hdmi_detect_enable != 0,
+ HDMI_EVENT_HDMI_PLUGOUT);
+
+ av8100_reg_hdmi_5_volt_time_r(&denc_off_time, NULL, NULL);
+
+ retval = av8100_reg_hdmi_5_volt_time_w(
+ denc_off_time,
+ pldet->hdmi_off_time,
+ pldet->on_time);
+
+ if (retval) {
+ dev_err(hdev->dev, "Failed to write the value to av8100 "
+ "register\n");
+ return -EFAULT;
+ }
+
+ return retval;
+}
+
+static int edidread(struct hdmi_device *hdev, struct edid_read *edidread,
+ u8 *len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.edid_section_readback_format.address = edidread->address;
+ config.edid_section_readback_format.block_number = edidread->block_nr;
+
+ dev_dbg(hdev->dev, "addr:%0x blnr:%0x",
+ config.edid_section_readback_format.address,
+ config.edid_section_readback_format.block_number);
+
+ if (av8100_conf_prep(AV8100_COMMAND_EDID_SECTION_READBACK,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_EDID_SECTION_READBACK,
+ len, data, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "len:%0x\n", *len);
+
+ return 0;
+}
+
+static int cecread(struct hdmi_device *hdev, u8 *src, u8 *dest, u8 *data_len,
+ u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buff[HDMI_CEC_READ_MAXSIZE];
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_READ_BACK,
+ &buf_len, buff, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len > 0) {
+ *src = (buff[0] & 0xF0) >> 4;
+ *dest = buff[0] & 0x0F;
+ *data_len = buf_len - 1;
+ memcpy(data, &buff[1], buf_len - 1);
+ } else
+ *data_len = 0;
+
+ return 0;
+}
+
+/* CEC tx status can be set or read */
+static bool cec_tx_status(struct hdmi_device *hdev,
+ enum cec_tx_status_action action)
+{
+ static bool cec_tx_busy;
+
+ switch (action) {
+ case CEC_TX_SET_FREE:
+ cec_tx_busy = false;
+ dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy);
+ break;
+
+ case CEC_TX_SET_BUSY:
+ cec_tx_busy = true;
+ dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy);
+ break;
+
+ case CEC_TX_CHECK:
+ default:
+ dev_dbg(hdev->dev, "cec_tx_busy chk:%d\n", cec_tx_busy);
+ break;
+ }
+
+ return cec_tx_busy;
+}
+
+static int cecsend(struct hdmi_device *hdev, u8 src, u8 dest, u8 data_len,
+ u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ int cnt;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.cec_message_write_format.buffer[0] = ((src & 0x0F) << 4) +
+ (dest & 0x0F);
+ config.cec_message_write_format.buffer_length = data_len + 1;
+ memcpy(&config.cec_message_write_format.buffer[1], data, data_len);
+
+ if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_enable_interrupt() != 0) {
+ dev_err(hdev->dev, "av8100_ei FAIL\n");
+ return -EINVAL;
+ }
+
+ cnt = 0;
+ while ((cnt < CECTX_TRY) && cec_tx_status(hdev, CEC_TX_CHECK)) {
+ /* Wait for pending CEC to be finished */
+ msleep(CECTX_WAITTIME);
+ cnt++;
+ }
+ dev_dbg(hdev->dev, "cectxcnt:%d\n", cnt);
+
+ if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_WRITE,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+ cec_tx_status(hdev, CEC_TX_SET_BUSY);
+
+ return 0;
+}
+
+static int infofrsend(struct hdmi_device *hdev, u8 type, u8 version, u8 crc,
+ u8 data_len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((data_len < 1) || (data_len > HDMI_INFOFRAME_MAX_SIZE))
+ return -EINVAL;
+
+ config.infoframes_format.type = type;
+ config.infoframes_format.version = version;
+ config.infoframes_format.crc = crc;
+ config.infoframes_format.length = data_len;
+ memcpy(&config.infoframes_format.data, data, data_len);
+ if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hdcpchkaesotp(struct hdmi_device *hdev, u8 *crc, u8 *progged)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[2];
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_READ;
+ memset(config.fuse_aes_key_format.key, 0, AV8100_FUSE_KEY_SIZE);
+ if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len == 2) {
+ *crc = buf[0];
+ *progged = buf[1];
+ }
+
+ return 0;
+}
+
+static int hdcpfuseaes(struct hdmi_device *hdev, u8 *key, u8 crc, u8 *result)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[2];
+
+ /* Default not OK */
+ *result = HDMI_RESULT_NOT_OK;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_WRITE;
+ memcpy(config.fuse_aes_key_format.key, key, AV8100_FUSE_KEY_SIZE);
+ if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if (buf_len == 2) {
+ dev_dbg(hdev->dev, "buf[0]:%02x buf[1]:%02x\n", buf[0], buf[1]);
+ if ((crc == buf[0]) && (buf[1] == 1))
+ /* OK */
+ *result = HDMI_RESULT_OK;
+ else
+ *result = HDMI_RESULT_CRC_MISMATCH;
+ }
+
+ return 0;
+}
+
+static int hdcploadaes(struct hdmi_device *hdev, u8 block, u8 key_len, u8 *key,
+ u8 *result, u8 *crc32)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+ u8 buf_len;
+ u8 buf[CRC32_SIZE];
+
+ /* Default not OK */
+ *result = HDMI_RESULT_NOT_OK;
+
+ dev_dbg(hdev->dev, "%s block:%d\n", __func__, block);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.hdcp_send_key_format.key_number = block;
+ config.hdcp_send_key_format.data_len = key_len;
+ memcpy(config.hdcp_send_key_format.data, key, key_len);
+ if (av8100_conf_prep(AV8100_COMMAND_HDCP_SENDKEY, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_HDCP_SENDKEY,
+ &buf_len, buf, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ if ((buf_len == CRC32_SIZE) && (crc32)) {
+ memcpy(crc32, buf, CRC32_SIZE);
+ dev_dbg(hdev->dev, "crc32:%02x%02x%02x%02x\n",
+ crc32[0], crc32[1], crc32[2], crc32[3]);
+ }
+
+ *result = HDMI_RESULT_OK;
+
+ return 0;
+}
+
+static int hdcpauthencr(struct hdmi_device *hdev, u8 auth_type, u8 encr_type,
+ u8 *len, u8 *data)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ switch (auth_type) {
+ case HDMI_HDCP_AUTH_OFF:
+ default:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_REQ_OFF;
+ break;
+
+ case HDMI_HDCP_AUTH_START:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_REQ_ON;
+ break;
+
+ case HDMI_HDCP_AUTH_REV_LIST_REQ:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_REV_LIST_REQ;
+ break;
+ case HDMI_HDCP_AUTH_CONT:
+ config.hdcp_management_format.req_type =
+ AV8100_HDCP_AUTH_CONT;
+ break;
+ }
+
+ switch (encr_type) {
+ case HDMI_HDCP_ENCR_OESS:
+ default:
+ config.hdcp_management_format.encr_use =
+ AV8100_HDCP_ENCR_USE_OESS;
+ break;
+
+ case HDMI_HDCP_ENCR_EESS:
+ config.hdcp_management_format.encr_use =
+ AV8100_HDCP_ENCR_USE_EESS;
+ break;
+ }
+
+ if (av8100_conf_prep(AV8100_COMMAND_HDCP_MANAGEMENT,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_HDCP_MANAGEMENT,
+ len, data, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 events_read(struct hdmi_device *hdev)
+{
+ int ret;
+
+ LOCK_HDMI_EVENTS;
+ ret = hdev->events;
+ dev_dbg(hdev->dev, "%s %02x\n", __func__, hdev->events);
+ UNLOCK_HDMI_EVENTS;
+
+ return ret;
+}
+
+static int events_clear(struct hdmi_device *hdev, u8 ev)
+{
+ dev_dbg(hdev->dev, "%s %02x\n", __func__, ev);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events &= ~ev & EVENTS_MASK;
+ UNLOCK_HDMI_EVENTS;
+
+ return 0;
+}
+
+static int event_wakeup(struct hdmi_device *hdev)
+{
+ struct kobject *kobj = &hdev->dev->kobj;
+
+ dev_dbg(hdev->dev, "%s", __func__);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events |= HDMI_EVENT_WAKEUP;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+ wake_up_interruptible(&hdev->event_wq);
+
+ return 0;
+}
+
+static int audiocfg(struct hdmi_device *hdev, struct audio_cfg *cfg)
+{
+ union av8100_configuration config;
+ struct av8100_status status;
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_INIT) {
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ config.audio_input_format.audio_input_if_format = cfg->if_format;
+ config.audio_input_format.i2s_input_nb = cfg->i2s_entries;
+ config.audio_input_format.sample_audio_freq = cfg->freq;
+ config.audio_input_format.audio_word_lg = cfg->word_length;
+ config.audio_input_format.audio_format = cfg->format;
+ config.audio_input_format.audio_if_mode = cfg->if_mode;
+ config.audio_input_format.audio_mute = cfg->mute;
+
+ if (av8100_conf_prep(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* sysfs */
+static ssize_t store_storeastext(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if ((count != HDMI_STOREASTEXT_BIN_SIZE) &&
+ (count != HDMI_STOREASTEXT_TEXT_SIZE) &&
+ (count != HDMI_STOREASTEXT_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ if ((count == HDMI_STOREASTEXT_BIN_SIZE) && (*buf == 0x1))
+ hdev->sysfs_data.store_as_hextext = true;
+ else if (((count == HDMI_STOREASTEXT_TEXT_SIZE) ||
+ (count == HDMI_STOREASTEXT_TEXT_SIZE + 1)) && (*buf == '0') &&
+ (*(buf + 1) == '1')) {
+ hdev->sysfs_data.store_as_hextext = true;
+ } else {
+ hdev->sysfs_data.store_as_hextext = false;
+ }
+
+ dev_dbg(hdev->dev, "store_as_hextext:%0d\n",
+ hdev->sysfs_data.store_as_hextext);
+
+ return count;
+}
+
+static ssize_t store_plugdeten(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct plug_detect plug_detect;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_PLUGDETEN_TEXT_SIZE) &&
+ (count != HDMI_PLUGDETEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ plug_detect.hdmi_detect_enable = htoi(buf + index);
+ index += 2;
+ plug_detect.on_time = htoi(buf + index);
+ index += 2;
+ plug_detect.hdmi_off_time = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_PLUGDETEN_BIN_SIZE)
+ return -EINVAL;
+ plug_detect.hdmi_detect_enable = *(buf + index++);
+ plug_detect.on_time = *(buf + index++);
+ plug_detect.hdmi_off_time = *(buf + index++);
+ }
+
+ if (plugdeten(hdev, &plug_detect))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_edidread(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct edid_read edid_read;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+ dev_dbg(hdev->dev, "count:%d\n", count);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_EDIDREAD_TEXT_SIZE) &&
+ (count != HDMI_EDIDREAD_TEXT_SIZE + 1))
+ return -EINVAL;
+ edid_read.address = htoi(buf + index);
+ index += 2;
+ edid_read.block_nr = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_EDIDREAD_BIN_SIZE)
+ return -EINVAL;
+ edid_read.address = *(buf + index++);
+ edid_read.block_nr = *(buf + index++);
+ }
+
+ if (edidread(hdev, &edid_read, &hdev->sysfs_data.edid_data.buf_len,
+ hdev->sysfs_data.edid_data.buf))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t show_edidread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int len;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ len = hdev->sysfs_data.edid_data.buf_len;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", len);
+ index += 2;
+ } else
+ *(buf + index++) = len;
+
+ dev_dbg(hdev->dev, "len:%02x\n", len);
+
+ cnt = 0;
+ while (cnt < len) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.edid_data.buf[cnt]);
+ index += 2;
+ } else
+ *(buf + index++) =
+ hdev->sysfs_data.edid_data.buf[cnt];
+
+ dev_dbg(hdev->dev, "%02x ",
+ hdev->sysfs_data.edid_data.buf[cnt]);
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_ceceven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_CECEVEN_TEXT_SIZE) &&
+ (count != HDMI_CECEVEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_CECEVEN_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ event_enable(hdev, enable, HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR |
+ HDMI_EVENT_CECTX);
+
+ return count;
+}
+
+static ssize_t show_cecread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct cec_rw cec_read;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (cecread(hdev, &cec_read.src, &cec_read.dest, &cec_read.length,
+ cec_read.data))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", cec_read.src);
+ index += 2;
+ snprintf(buf + index, 3, "%02x", cec_read.dest);
+ index += 2;
+ snprintf(buf + index, 3, "%02x", cec_read.length);
+ index += 2;
+ } else {
+ *(buf + index++) = cec_read.src;
+ *(buf + index++) = cec_read.dest;
+ *(buf + index++) = cec_read.length;
+ }
+
+ dev_dbg(hdev->dev, "len:%02x\n", cec_read.length);
+
+ cnt = 0;
+ while (cnt < cec_read.length) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", cec_read.data[cnt]);
+ index += 2;
+ } else
+ *(buf + index++) = cec_read.data[cnt];
+
+ dev_dbg(hdev->dev, "%02x ", cec_read.data[cnt]);
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_cecsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct cec_rw cec_w;
+ int index = 0;
+ int cnt;
+ int store_as_text;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if ((*buf == 'F') || (*buf == 'f'))
+ /* To be able to override bin format for test purpose */
+ store_as_text = 1;
+ else
+ store_as_text = hdev->sysfs_data.store_as_hextext;
+
+ if (store_as_text) {
+ if ((count < HDMI_CECSEND_TEXT_SIZE_MIN) ||
+ (count > HDMI_CECSEND_TEXT_SIZE_MAX))
+ return -EINVAL;
+
+ cec_w.src = htoi(buf + index) & 0x0F;
+ index += 2;
+ cec_w.dest = htoi(buf + index);
+ index += 2;
+ cec_w.length = htoi(buf + index);
+ index += 2;
+ if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE)
+ return -EINVAL;
+ cnt = 0;
+ while (cnt < cec_w.length) {
+ cec_w.data[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", cec_w.data[cnt]);
+ cnt++;
+ }
+ } else {
+ if ((count < HDMI_CECSEND_BIN_SIZE_MIN) ||
+ (count > HDMI_CECSEND_BIN_SIZE_MAX))
+ return -EINVAL;
+
+ cec_w.src = *(buf + index++);
+ cec_w.dest = *(buf + index++);
+ cec_w.length = *(buf + index++);
+ if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE)
+ return -EINVAL;
+ memcpy(cec_w.data, buf + index, cec_w.length);
+ }
+
+ if (cecsend(hdev, cec_w.src, cec_w.dest, cec_w.length, cec_w.data))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_infofrsend(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct info_fr info_fr;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count < HDMI_INFOFRSEND_TEXT_SIZE_MIN) ||
+ (count > HDMI_INFOFRSEND_TEXT_SIZE_MAX))
+ return -EINVAL;
+
+ info_fr.type = htoi(&buf[index]);
+ index += 2;
+ info_fr.ver = htoi(&buf[index]);
+ index += 2;
+ info_fr.crc = htoi(&buf[index]);
+ index += 2;
+ info_fr.length = htoi(&buf[index]);
+ index += 2;
+
+ if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE)
+ return -EINVAL;
+ cnt = 0;
+ while (cnt < info_fr.length) {
+ info_fr.data[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", info_fr.data[cnt]);
+ cnt++;
+ }
+ } else {
+ if ((count < HDMI_INFOFRSEND_BIN_SIZE_MIN) ||
+ (count > HDMI_INFOFRSEND_BIN_SIZE_MAX))
+ return -EINVAL;
+
+ info_fr.type = *(buf + index++);
+ info_fr.ver = *(buf + index++);
+ info_fr.crc = *(buf + index++);
+ info_fr.length = *(buf + index++);
+
+ if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE)
+ return -EINVAL;
+ memcpy(info_fr.data, buf + index, info_fr.length);
+ }
+
+ if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc,
+ info_fr.length, info_fr.data))
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t store_hdcpeven(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCPEVEN_TEXT_SIZE) &&
+ (count != HDMI_HDCPEVEN_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_HDCPEVEN_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ event_enable(hdev, enable, HDMI_EVENT_HDCP);
+
+ return count;
+}
+
+static ssize_t show_hdcpchkaesotp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 crc;
+ u8 progged;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdcpchkaesotp(hdev, &crc, &progged))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", progged);
+ index += 2;
+ } else {
+ *(buf + index++) = progged;
+ }
+
+ dev_dbg(hdev->dev, "progged:%02x\n", progged);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_fuseaes hdcp_fuseaes;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default not OK */
+ hdev->sysfs_data.fuse_result = HDMI_RESULT_NOT_OK;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCP_FUSEAES_TEXT_SIZE) &&
+ (count != HDMI_HDCP_FUSEAES_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ cnt = 0;
+ while (cnt < HDMI_HDCP_FUSEAES_KEYSIZE) {
+ hdcp_fuseaes.key[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.key[cnt]);
+ cnt++;
+ }
+ hdcp_fuseaes.crc = htoi(&buf[index]);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.crc);
+ } else {
+ if (count != HDMI_HDCP_FUSEAES_BIN_SIZE)
+ return -EINVAL;
+
+ memcpy(hdcp_fuseaes.key, buf + index,
+ HDMI_HDCP_FUSEAES_KEYSIZE);
+ index += HDMI_HDCP_FUSEAES_KEYSIZE;
+ hdcp_fuseaes.crc = *(buf + index++);
+ }
+
+ if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc,
+ &hdcp_fuseaes.result))
+ return -EINVAL;
+
+ dev_dbg(hdev->dev, "fuseresult:%02x ", hdcp_fuseaes.result);
+
+ hdev->sysfs_data.fuse_result = hdcp_fuseaes.result;
+
+ return count;
+}
+
+static ssize_t show_hdcpfuseaes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", hdev->sysfs_data.fuse_result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.fuse_result;
+
+ dev_dbg(hdev->dev, "status:%02x\n", hdev->sysfs_data.fuse_result);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_loadaesone hdcp_loadaes;
+ int index = 0;
+ int block_cnt;
+ int cnt;
+ u8 crc32_rcvd[CRC32_SIZE];
+ u8 crc;
+ u8 progged;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default not OK */
+ hdev->sysfs_data.loadaes_result = HDMI_RESULT_NOT_OK;
+
+ if (hdcpchkaesotp(hdev, &crc, &progged))
+ return -EINVAL;
+
+ if (!progged) {
+ /* AES is not fused */
+ hdcp_loadaes.result = HDMI_AES_NOT_FUSED;
+ goto store_hdcploadaes_err;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCP_LOADAES_TEXT_SIZE) &&
+ (count != HDMI_HDCP_LOADAES_TEXT_SIZE + 1)) {
+ dev_err(hdev->dev, "%s", "count mismatch\n");
+ return -EINVAL;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ cnt = 0;
+ while (cnt < HDMI_HDCP_AES_KEYSIZE) {
+ hdcp_loadaes.key[cnt] = htoi(buf + index);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ",
+ hdcp_loadaes.key[cnt]);
+ cnt++;
+ }
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ crc32_rcvd)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err aes block",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ cnt = HDMI_HDCP_AES_KSVZEROESSIZE;
+ while (cnt < HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE) {
+ hdcp_loadaes.key[cnt] =
+ htoi(&buf[index]);
+ index += 2;
+ dev_dbg(hdev->dev, "%02x ", hdcp_loadaes.key[cnt]);
+ cnt++;
+ }
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ NULL)) {
+ dev_err(hdev->dev,
+ "%s %d\n", "hdcploadaes err in ksv\n",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ /* CRC32 */
+ for (cnt = 0; cnt < CRC32_SIZE; cnt++) {
+ hdcp_loadaes.crc32[cnt] = htoi(buf + index);
+ index += 2;
+ }
+
+ if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaes.crc32[0],
+ hdcp_loadaes.crc32[1],
+ hdcp_loadaes.crc32[2],
+ hdcp_loadaes.crc32[3]);
+ hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH;
+ goto store_hdcploadaes_err;
+ }
+ } else {
+ if (count != HDMI_HDCP_LOADAES_BIN_SIZE) {
+ dev_err(hdev->dev, "%s", "count mismatch\n");
+ return -EINVAL;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ memcpy(hdcp_loadaes.key, buf + index,
+ HDMI_HDCP_AES_KEYSIZE);
+ index += HDMI_HDCP_AES_KEYSIZE;
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ crc32_rcvd)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err aes block",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ if (hdcp_loadaes.result)
+ goto store_hdcploadaes_err;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ memcpy(hdcp_loadaes.key + HDMI_HDCP_AES_KSVZEROESSIZE,
+ buf + index,
+ HDMI_HDCP_AES_KSVSIZE);
+ index += HDMI_HDCP_AES_KSVSIZE;
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaes.key,
+ &hdcp_loadaes.result,
+ NULL)) {
+ dev_err(hdev->dev, "%s %d\n",
+ "hdcploadaes err in ksv\n",
+ block_cnt + HDMI_HDCP_AES_BLOCK_START);
+ return -EINVAL;
+ }
+
+ memcpy(hdcp_loadaes.crc32, buf + index, CRC32_SIZE);
+ index += CRC32_SIZE;
+
+ /* CRC32 */
+ if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaes.crc32[0],
+ hdcp_loadaes.crc32[1],
+ hdcp_loadaes.crc32[2],
+ hdcp_loadaes.crc32[3]);
+ hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH;
+ }
+ }
+
+store_hdcploadaes_err:
+ hdev->sysfs_data.loadaes_result = hdcp_loadaes.result;
+ return count;
+}
+
+static ssize_t show_hdcploadaes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.loadaes_result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.loadaes_result;
+
+ dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.loadaes_result);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct hdcp_authencr hdcp_authencr;
+ int index = 0;
+ u8 crc;
+ u8 progged;
+ int result = HDMI_RESULT_NOT_OK;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* Default */
+ hdev->sysfs_data.authencr.buf_len = 0;
+
+ if (hdcpchkaesotp(hdev, &crc, &progged)) {
+ result = HDMI_AES_NOT_FUSED;
+ goto store_hdcpauthencr_end;
+ }
+
+ if (!progged) {
+ /* AES is not fused */
+ result = HDMI_AES_NOT_FUSED;
+ goto store_hdcpauthencr_end;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_HDCPAUTHENCR_TEXT_SIZE) &&
+ (count != HDMI_HDCPAUTHENCR_TEXT_SIZE + 1))
+ goto store_hdcpauthencr_end;
+
+ hdcp_authencr.auth_type = htoi(buf + index);
+ index += 2;
+ hdcp_authencr.encr_type = htoi(buf + index);
+ index += 2;
+ } else {
+ if (count != HDMI_HDCPAUTHENCR_BIN_SIZE)
+ goto store_hdcpauthencr_end;
+
+ hdcp_authencr.auth_type = *(buf + index++);
+ hdcp_authencr.encr_type = *(buf + index++);
+ }
+
+ if (hdcpauthencr(hdev, hdcp_authencr.auth_type, hdcp_authencr.encr_type,
+ &hdev->sysfs_data.authencr.buf_len,
+ hdev->sysfs_data.authencr.buf))
+ goto store_hdcpauthencr_end;
+
+ result = HDMI_RESULT_OK;
+
+store_hdcpauthencr_end:
+ hdev->sysfs_data.authencr.result = result;
+ return count;
+}
+
+static ssize_t show_hdcpauthencr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int len;
+ int index = 0;
+ int cnt;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ /* result */
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.authencr.result);
+ index += 2;
+ } else
+ *(buf + index++) = hdev->sysfs_data.authencr.result;
+
+ dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.authencr.result);
+
+ /* resp_size */
+ len = hdev->sysfs_data.authencr.buf_len;
+ if (len > AUTH_BUF_LEN)
+ len = AUTH_BUF_LEN;
+ dev_dbg(hdev->dev, "resp_size:%d\n", len);
+
+ /* resp */
+ cnt = 0;
+ while (cnt < len) {
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x",
+ hdev->sysfs_data.authencr.buf[cnt]);
+ index += 2;
+
+ dev_dbg(hdev->dev, "%02x ",
+ hdev->sysfs_data.authencr.buf[cnt]);
+
+ } else
+ *(buf + index++) = hdev->sysfs_data.authencr.buf[cnt];
+
+ cnt++;
+ }
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t show_hdcpstateget(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 hdcp_state;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL, &hdcp_state))
+ return -EINVAL;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", hdcp_state);
+ index += 2;
+ } else
+ *(buf + index++) = hdcp_state;
+
+ dev_dbg(hdev->dev, "status:%02x\n", hdcp_state);
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t show_evread(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ u8 ev;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ ev = events_read(hdev);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", ev);
+ index += 2;
+ } else
+ *(buf + index++) = ev;
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ /* Events are read: clear events */
+ events_clear(hdev, EVENTS_MASK);
+
+ return index;
+}
+
+static ssize_t store_evclr(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ u8 ev;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_EVCLR_TEXT_SIZE) &&
+ (count != HDMI_EVCLR_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ ev = htoi(&buf[index]);
+ index += 2;
+ } else {
+ if (count != HDMI_EVCLR_BIN_SIZE)
+ return -EINVAL;
+
+ ev = *(buf + index++);
+ }
+
+ events_clear(hdev, ev);
+
+ return count;
+}
+
+static ssize_t store_audiocfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ struct audio_cfg audio_cfg;
+ int index = 0;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_AUDIOCFG_TEXT_SIZE) &&
+ (count != HDMI_AUDIOCFG_TEXT_SIZE + 1))
+ return -EINVAL;
+
+ audio_cfg.if_format = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.i2s_entries = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.freq = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.word_length = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.format = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.if_mode = htoi(&buf[index]);
+ index += 2;
+ audio_cfg.mute = htoi(&buf[index]);
+ index += 2;
+ } else {
+ if (count != HDMI_AUDIOCFG_BIN_SIZE)
+ return -EINVAL;
+
+ audio_cfg.if_format = *(buf + index++);
+ audio_cfg.i2s_entries = *(buf + index++);
+ audio_cfg.freq = *(buf + index++);
+ audio_cfg.word_length = *(buf + index++);
+ audio_cfg.format = *(buf + index++);
+ audio_cfg.if_mode = *(buf + index++);
+ audio_cfg.mute = *(buf + index++);
+ }
+
+ audiocfg(hdev, &audio_cfg);
+
+ return count;
+}
+
+static ssize_t show_plugstatus(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ struct av8100_status av8100_status;
+ u8 plstat;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ av8100_status = av8100_status_get();
+ plstat = av8100_status.av8100_plugin_status == AV8100_HDMI_PLUGIN;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", plstat);
+ index += 2;
+ } else
+ *(buf + index++) = plstat;
+
+ if (hdev->sysfs_data.store_as_hextext)
+ index++;
+
+ return index;
+}
+
+static ssize_t store_poweronoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ bool enable = false;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ if ((count != HDMI_POWERONOFF_TEXT_SIZE) &&
+ (count != HDMI_POWERONOFF_TEXT_SIZE + 1))
+ return -EINVAL;
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ } else {
+ if (count != HDMI_POWERONOFF_BIN_SIZE)
+ return -EINVAL;
+ if (*buf == 0x01)
+ enable = true;
+ }
+
+ if (enable == 0) {
+ if (av8100_powerdown() != 0) {
+ dev_err(hdev->dev, "av8100_powerdown FAIL\n");
+ return -EINVAL;
+ }
+ } else {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup FAIL\n");
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t show_poweronoff(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+ int index = 0;
+ struct av8100_status status;
+ u8 power_state;
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_SCAN)
+ power_state = 0;
+ else
+ power_state = 1;
+
+ if (hdev->sysfs_data.store_as_hextext) {
+ snprintf(buf + index, 3, "%02x", power_state);
+ index += 3;
+ } else {
+ *(buf + index++) = power_state;
+ }
+
+ return index;
+}
+
+static ssize_t store_evwakeup(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hdmi_device *hdev = dev_to_hdev(dev);
+
+ if (!hdev)
+ return -EFAULT;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ event_wakeup(hdev);
+
+ return count;
+}
+
+static int hdmi_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int hdmi_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/* ioctl */
+static long hdmi_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ u8 value = 0;
+ struct hdmi_register reg;
+ struct av8100_status status;
+ u8 aes_status;
+ struct hdmi_device *hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+
+ switch (cmd) {
+ case IOC_PLUG_DETECT_ENABLE:
+ {
+ struct plug_detect plug_detect;
+
+ if (copy_from_user(&plug_detect, (void *)arg,
+ sizeof(struct plug_detect)))
+ return -EINVAL;
+
+ if (plugdeten(hdev, &plug_detect))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_EDID_READ:
+ {
+ struct edid_read edid_read;
+
+ if (copy_from_user(&edid_read, (void *)arg,
+ sizeof(struct edid_read)))
+ return -EINVAL;
+
+ if (edidread(hdev, &edid_read, &edid_read.data_length,
+ edid_read.data))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&edid_read,
+ sizeof(struct edid_read))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_CEC_EVENT_ENABLE:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ event_enable(hdev, value != 0,
+ HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR |
+ HDMI_EVENT_CECTX);
+ break;
+
+ case IOC_CEC_READ:
+ {
+ struct cec_rw cec_read;
+
+ if (cecread(hdev, &cec_read.src, &cec_read.dest,
+ &cec_read.length, cec_read.data))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&cec_read,
+ sizeof(struct cec_rw))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_CEC_SEND:
+ {
+ struct cec_rw cec_send;
+
+ if (copy_from_user(&cec_send, (void *)arg,
+ sizeof(struct cec_rw)))
+ return -EINVAL;
+
+ if (cecsend(hdev, cec_send.src, cec_send.dest, cec_send.length,
+ cec_send.data))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_INFOFRAME_SEND:
+ {
+ struct info_fr info_fr;
+
+ if (copy_from_user(&info_fr, (void *)arg,
+ sizeof(struct info_fr)))
+ return -EINVAL;
+
+ if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc,
+ info_fr.length, info_fr.data))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_EVENT_ENABLE:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ event_enable(hdev, value != 0, HDMI_EVENT_HDCP);
+ break;
+
+ case IOC_HDCP_CHKAESOTP:
+ if (hdcpchkaesotp(hdev, &value, &aes_status))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&aes_status,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_FUSEAES:
+ {
+ struct hdcp_fuseaes hdcp_fuseaes;
+
+ if (copy_from_user(&hdcp_fuseaes, (void *)arg,
+ sizeof(struct hdcp_fuseaes)))
+ return -EINVAL;
+
+ if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc,
+ &hdcp_fuseaes.result))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&hdcp_fuseaes,
+ sizeof(struct hdcp_fuseaes))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDCP_LOADAES:
+ {
+ int block_cnt;
+ struct hdcp_loadaesone hdcp_loadaesone;
+ struct hdcp_loadaesall hdcp_loadaesall;
+
+ if (copy_from_user(&hdcp_loadaesall, (void *)arg,
+ sizeof(struct hdcp_loadaesall)))
+ return -EINVAL;
+
+ if (hdcpchkaesotp(hdev, &value, &aes_status))
+ return -EINVAL;
+
+ if (!aes_status) {
+ /* AES is not fused */
+ hdcp_loadaesone.result = HDMI_AES_NOT_FUSED;
+ goto ioc_hdcploadaes_err;
+ }
+
+ /* AES */
+ block_cnt = 0;
+ while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) {
+ memcpy(hdcp_loadaesone.key, hdcp_loadaesall.key +
+ block_cnt * HDMI_HDCP_AES_KEYSIZE,
+ HDMI_HDCP_AES_KEYSIZE);
+
+ if (hdcploadaes(hdev,
+ block_cnt + HDMI_HDCP_AES_BLOCK_START,
+ HDMI_HDCP_AES_KEYSIZE,
+ hdcp_loadaesone.key,
+ &hdcp_loadaesone.result,
+ hdcp_loadaesone.crc32))
+ return -EINVAL;
+
+ if (hdcp_loadaesone.result)
+ return -EINVAL;
+
+ block_cnt++;
+ }
+
+ /* KSV */
+ memset(hdcp_loadaesone.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE);
+ memcpy(hdcp_loadaesone.key + HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaesall.ksv, HDMI_HDCP_AES_KSVSIZE);
+
+ if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK,
+ HDMI_HDCP_AES_KSVSIZE +
+ HDMI_HDCP_AES_KSVZEROESSIZE,
+ hdcp_loadaesone.key,
+ &hdcp_loadaesone.result,
+ NULL))
+ return -EINVAL;
+
+ if (hdcp_loadaesone.result)
+ return -EINVAL;
+
+ /* CRC32 */
+ if (memcmp(hdcp_loadaesall.crc32, hdcp_loadaesone.crc32,
+ CRC32_SIZE)) {
+ dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n",
+ hdcp_loadaesall.crc32[0],
+ hdcp_loadaesall.crc32[1],
+ hdcp_loadaesall.crc32[2],
+ hdcp_loadaesall.crc32[3]);
+ hdcp_loadaesone.result = HDMI_RESULT_CRC_MISMATCH;
+ goto ioc_hdcploadaes_err;
+ }
+
+ioc_hdcploadaes_err:
+ hdcp_loadaesall.result = hdcp_loadaesone.result;
+
+ if (copy_to_user((void *)arg, (void *)&hdcp_loadaesall,
+ sizeof(struct hdcp_loadaesall))) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDCP_AUTHENCR_REQ:
+ {
+ struct hdcp_authencr hdcp_authencr;
+ int result = HDMI_RESULT_NOT_OK;
+
+ u8 buf[AUTH_BUF_LEN];
+
+ if (copy_from_user(&hdcp_authencr, (void *)arg,
+ sizeof(struct hdcp_authencr)))
+ return -EINVAL;
+
+ /* Default not OK */
+ hdcp_authencr.resp_size = 0;
+
+ if (hdcpchkaesotp(hdev, &value, &aes_status)) {
+ result = HDMI_AES_NOT_FUSED;
+ goto hdcp_authencr_end;
+ }
+
+ if (!aes_status) {
+ /* AES is not fused */
+ result = HDMI_AES_NOT_FUSED;
+ goto hdcp_authencr_end;
+ }
+
+ if (hdcpauthencr(hdev, hdcp_authencr.auth_type,
+ hdcp_authencr.encr_type,
+ &value,
+ buf)) {
+ result = HDMI_RESULT_NOT_OK;
+ goto hdcp_authencr_end;
+ }
+
+ if (value > AUTH_BUF_LEN)
+ value = AUTH_BUF_LEN;
+
+ result = HDMI_RESULT_OK;
+ hdcp_authencr.resp_size = value;
+ memcpy(hdcp_authencr.resp, buf, value);
+
+hdcp_authencr_end:
+ hdcp_authencr.result = result;
+ if (copy_to_user((void *)arg, (void *)&hdcp_authencr,
+ sizeof(struct hdcp_authencr)))
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDCP_STATE_GET:
+ if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL,
+ &value))
+ return -EINVAL;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_EVENTS_READ:
+ value = events_read(hdev);
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+
+ /* Events are read: clear events */
+ events_clear(hdev, EVENTS_MASK);
+ break;
+
+ case IOC_EVENTS_CLEAR:
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ events_clear(hdev, value);
+ break;
+
+ case IOC_AUDIO_CFG:
+ {
+ struct audio_cfg audio_cfg;
+
+ if (copy_from_user(&audio_cfg, (void *)arg,
+ sizeof(struct audio_cfg)))
+ return -EINVAL;
+
+ audiocfg(hdev, &audio_cfg);
+ }
+ break;
+
+ case IOC_PLUG_STATUS:
+ status = av8100_status_get();
+ value = status.av8100_plugin_status == AV8100_HDMI_PLUGIN;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_POWERONOFF:
+ /* Get desired power state on or off */
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EINVAL;
+
+ if (value == 0) {
+ if (av8100_powerdown() != 0) {
+ dev_err(hdev->dev, "av8100_powerdown FAIL\n");
+ return -EINVAL;
+ }
+ } else {
+ if (av8100_powerup() != 0) {
+ dev_err(hdev->dev, "av8100_powerup FAIL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_EVENT_WAKEUP:
+ /* Trigger event */
+ event_wakeup(hdev);
+ break;
+
+ case IOC_POWERSTATE:
+ status = av8100_status_get();
+ value = status.av8100_state >= AV8100_OPMODE_SCAN;
+
+ if (copy_to_user((void *)arg, (void *)&value,
+ sizeof(u8))) {
+ return -EINVAL;
+ }
+ break;
+
+ /* Internal */
+ case IOC_HDMI_ENABLE_INTERRUPTS:
+ av8100_disable_interrupt();
+ if (av8100_enable_interrupt() != 0) {
+ dev_err(hdev->dev, "av8100_ei FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_DOWNLOAD_FW:
+ if (av8100_download_firmware(I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100 dl fw FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_ONOFF:
+ {
+ union av8100_configuration config;
+
+ /* Get desired HDMI mode on or off */
+ if (copy_from_user(&value, (void *)arg, sizeof(u8)))
+ return -EFAULT;
+
+ if (av8100_conf_get(AV8100_COMMAND_HDMI, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_get FAIL\n");
+ return -EINVAL;
+ }
+ if (value == 0)
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF;
+ else
+ config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+
+ if (av8100_conf_prep(AV8100_COMMAND_HDMI, &config) != 0) {
+ dev_err(hdev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+ if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE) != 0) {
+ dev_err(hdev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case IOC_HDMI_REGISTER_WRITE:
+ if (copy_from_user(&reg, (void *)arg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+
+ if (av8100_reg_w(reg.offset, reg.value) != 0) {
+ dev_err(hdev->dev, "hdmi_register_write FAIL\n");
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_REGISTER_READ:
+ if (copy_from_user(&reg, (void *)arg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+
+ if (av8100_reg_r(reg.offset, &reg.value) != 0) {
+ dev_err(hdev->dev, "hdmi_register_write FAIL\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((void *)arg, (void *)&reg,
+ sizeof(struct hdmi_register))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_STATUS_GET:
+ status = av8100_status_get();
+
+ if (copy_to_user((void *)arg, (void *)&status,
+ sizeof(struct av8100_status))) {
+ return -EINVAL;
+ }
+ break;
+
+ case IOC_HDMI_CONFIGURATION_WRITE:
+ {
+ struct hdmi_command_register command_reg;
+
+ if (copy_from_user(&command_reg, (void *)arg,
+ sizeof(struct hdmi_command_register)) != 0) {
+ dev_err(hdev->dev, "IOC_HDMI_CONFIGURATION_WRITE "
+ "fail 1\n");
+ command_reg.return_status = EINVAL;
+ } else {
+ command_reg.return_status = 0;
+ if (av8100_conf_w_raw(command_reg.cmd_id,
+ command_reg.buf_len,
+ command_reg.buf,
+ &(command_reg.buf_len),
+ command_reg.buf) != 0) {
+ dev_err(hdev->dev,
+ "IOC_HDMI_CONFIGURATION_WRITE "
+ "fail 2\n");
+ command_reg.return_status = EINVAL;
+ }
+ }
+
+ if (copy_to_user((void *)arg, (void *)&command_reg,
+ sizeof(struct hdmi_command_register)) != 0) {
+ return -EINVAL;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int
+hdmi_poll(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct hdmi_device *hdev;
+
+ hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+ if (!hdev)
+ return 0;
+
+ dev_dbg(hdev->dev, "%s\n", __func__);
+
+ poll_wait(filp, &hdev->event_wq , wait);
+
+ LOCK_HDMI_EVENTS;
+ if (hdev->events_received == true) {
+ hdev->events_received = false;
+ mask = POLLIN | POLLRDNORM;
+ }
+ UNLOCK_HDMI_EVENTS;
+
+ return mask;
+}
+
+static const struct file_operations hdmi_fops = {
+ .owner = THIS_MODULE,
+ .open = hdmi_open,
+ .release = hdmi_release,
+ .unlocked_ioctl = hdmi_ioctl,
+ .poll = hdmi_poll
+};
+
+/* Event callback function called by hw driver */
+void hdmi_event(enum av8100_hdmi_event ev)
+{
+ int events_old;
+ int events_new;
+ struct hdmi_device *hdev;
+ struct kobject *kobj;
+
+ hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT);
+ if (!hdev)
+ return;
+
+ dev_dbg(hdev->dev, "hdmi_event %02x\n", ev);
+
+ kobj = &(hdev->dev->kobj);
+
+ LOCK_HDMI_EVENTS;
+
+ events_old = hdev->events;
+
+ /* Set event */
+ switch (ev) {
+ case AV8100_HDMI_EVENT_HDMI_PLUGIN:
+ hdev->events &= ~HDMI_EVENT_HDMI_PLUGOUT;
+ hdev->events |= HDMI_EVENT_HDMI_PLUGIN;
+ break;
+
+ case AV8100_HDMI_EVENT_HDMI_PLUGOUT:
+ hdev->events &= ~HDMI_EVENT_HDMI_PLUGIN;
+ hdev->events |= HDMI_EVENT_HDMI_PLUGOUT;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ case AV8100_HDMI_EVENT_CEC:
+ hdev->events |= HDMI_EVENT_CEC;
+ break;
+
+ case AV8100_HDMI_EVENT_HDCP:
+ hdev->events |= HDMI_EVENT_HDCP;
+ break;
+
+ case AV8100_HDMI_EVENT_CECTXERR:
+ hdev->events |= HDMI_EVENT_CECTXERR;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ case AV8100_HDMI_EVENT_CECTX:
+ hdev->events |= HDMI_EVENT_CECTX;
+ cec_tx_status(hdev, CEC_TX_SET_FREE);
+ break;
+
+ default:
+ break;
+ }
+
+ events_new = hdev->events_mask & hdev->events;
+
+ UNLOCK_HDMI_EVENTS;
+
+ dev_dbg(hdev->dev, "hdmi events:%02x, events_old:%02x mask:%02x\n",
+ events_new, events_old, hdev->events_mask);
+
+ if (events_new != events_old) {
+ /* Wake up application waiting for event via call to poll() */
+ sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME);
+
+ LOCK_HDMI_EVENTS;
+ hdev->events_received = true;
+ UNLOCK_HDMI_EVENTS;
+
+ wake_up_interruptible(&hdev->event_wq);
+ }
+}
+EXPORT_SYMBOL(hdmi_event);
+
+int hdmi_device_register(struct hdmi_device *hdev)
+{
+ hdev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ hdev->miscdev.name = "hdmi";
+ hdev->miscdev.fops = &hdmi_fops;
+
+ if (misc_register(&hdev->miscdev)) {
+ pr_err("hdmi misc_register failed\n");
+ return -EFAULT;
+ }
+
+ hdev->dev = hdev->miscdev.this_device;
+
+ return 0;
+}
+
+int __init hdmi_init(void)
+{
+ struct hdmi_device *hdev;
+ int i;
+ int ret;
+
+ /* Allocate device data */
+ hdev = kzalloc(sizeof(struct hdmi_device), GFP_KERNEL);
+ if (!hdev) {
+ pr_err("%s: Alloc failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Add to list */
+ list_add_tail(&hdev->list, &hdmi_device_list);
+
+ if (hdmi_device_register(hdev)) {
+ pr_err("%s: Alloc failure\n", __func__);
+ return -EFAULT;
+ }
+
+ hdev->devnr = HDMI_DEVNR_DEFAULT;
+
+ /* Default sysfs file format is hextext */
+ hdev->sysfs_data.store_as_hextext = true;
+
+ init_waitqueue_head(&hdev->event_wq);
+
+ /* Create sysfs attrs */
+ for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++) {
+ ret = device_create_file(hdev->dev, &hdmi_sysfs_attrs[i]);
+ if (ret)
+ dev_err(hdev->dev,
+ "Unable to create sysfs attr %s (%d)\n",
+ hdmi_sysfs_attrs[i].attr.name, ret);
+ }
+
+ /* Register event callback */
+ av8100_hdmi_event_cb_set(hdmi_event);
+
+ return 0;
+}
+late_initcall(hdmi_init);
+
+void hdmi_exit(void)
+{
+ struct hdmi_device *hdev = NULL;
+ int i;
+
+ if (list_empty(&hdmi_device_list))
+ return;
+ else
+ hdev = list_entry(hdmi_device_list.next,
+ struct hdmi_device, list);
+
+ /* Deregister event callback */
+ av8100_hdmi_event_cb_set(NULL);
+
+ /* Remove sysfs attrs */
+ for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++)
+ device_remove_file(hdev->dev, &hdmi_sysfs_attrs[i]);
+
+ misc_deregister(&hdev->miscdev);
+
+ /* Remove from list */
+ list_del(&hdev->list);
+
+ /* Free device data */
+ kfree(hdev);
+}
diff --git a/drivers/video/av8100/hdmi_loc.h b/drivers/video/av8100/hdmi_loc.h
new file mode 100644
index 00000000000..20314910db9
--- /dev/null
+++ b/drivers/video/av8100/hdmi_loc.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#ifndef __HDMI_LOC__H__
+#define __HDMI_LOC__H__
+
+#define EDID_BUF_LEN 128
+#define COMMAND_BUF_LEN 128
+#define AES_KEY_SIZE 16
+#define CRC32_SIZE 4
+#define AUTH_BUF_LEN 126
+#define CECTX_TRY 20
+#define CECTX_WAITTIME 25
+
+struct edid_data {
+ u8 buf_len;
+ u8 buf[EDID_BUF_LEN];
+};
+
+struct authencr {
+ int result;
+ u8 buf_len;
+ u8 buf[AUTH_BUF_LEN];
+};
+
+struct hdmi_register {
+ unsigned char value;
+ unsigned char offset;
+};
+
+struct hdcp_loadaesone {
+ u8 key[AES_KEY_SIZE];
+ u8 result;
+ u8 crc32[CRC32_SIZE];
+};
+
+struct hdmi_sysfs_data {
+ bool store_as_hextext;
+ struct plug_detect plug_detect;
+ bool enable_cec_event;
+ struct edid_data edid_data;
+ struct cec_rw cec_read;
+ bool fuse_result;
+ int loadaes_result;
+ struct authencr authencr;
+};
+
+struct hdmi_command_register {
+ unsigned char cmd_id; /* input */
+ unsigned char buf_len; /* input, output */
+ unsigned char buf[COMMAND_BUF_LEN]; /* input, output */
+ unsigned char return_status; /* output */
+};
+
+enum cec_tx_status_action {
+ CEC_TX_SET_FREE,
+ CEC_TX_SET_BUSY,
+ CEC_TX_CHECK
+};
+
+/* Internal */
+#define IOC_HDMI_ENABLE_INTERRUPTS _IOWR(HDMI_IOC_MAGIC, 32, int)
+#define IOC_HDMI_DOWNLOAD_FW _IOWR(HDMI_IOC_MAGIC, 33, int)
+#define IOC_HDMI_ONOFF _IOWR(HDMI_IOC_MAGIC, 34, int)
+#define IOC_HDMI_REGISTER_WRITE _IOWR(HDMI_IOC_MAGIC, 35, int)
+#define IOC_HDMI_REGISTER_READ _IOWR(HDMI_IOC_MAGIC, 36, int)
+#define IOC_HDMI_STATUS_GET _IOWR(HDMI_IOC_MAGIC, 37, int)
+#define IOC_HDMI_CONFIGURATION_WRITE _IOWR(HDMI_IOC_MAGIC, 38, int)
+
+#endif /* __HDMI_LOC__H__ */
diff --git a/drivers/video/b2r2/Kconfig b/drivers/video/b2r2/Kconfig
new file mode 100644
index 00000000000..8cc81876de7
--- /dev/null
+++ b/drivers/video/b2r2/Kconfig
@@ -0,0 +1,134 @@
+config FB_B2R2
+ tristate "B2R2 engine support"
+ default n
+ help
+ B2R2 engine does various bit-blitting operations,post-processor operations
+ and various compositions.
+
+config B2R2_PLUG_CONF
+ bool "B2R2 bus plug configuration"
+ depends on FB_B2R2
+ default n
+ help
+ Configures how B2R2 access the memory bus. Enabling this will increase
+ the performance of B2R2 at the cost of using the bus more heavily.
+
+ If this is set to 'n', the hardware defaults will be used.
+
+choice
+ prompt "Opcode size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_OPSIZE_64
+
+ config B2R2_OPSIZE_8
+ bool "8 bytes"
+ config B2R2_OPSIZE_16
+ bool "16 bytes"
+ config B2R2_OPSIZE_32
+ bool "32 bytes"
+ config B2R2_OPSIZE_64
+ bool "64 bytes"
+
+endchoice
+
+choice
+ prompt "Chunk size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_CHSIZE_128
+
+ config B2R2_CHSIZE_1
+ bool "1 op"
+ config B2R2_CHSIZE_2
+ bool "2 ops"
+ config B2R2_CHSIZE_4
+ bool "4 ops"
+ config B2R2_CHSIZE_8
+ bool "8 ops"
+ config B2R2_CHSIZE_16
+ bool "16 ops"
+ config B2R2_CHSIZE_32
+ bool "32 ops"
+ config B2R2_CHSIZE_64
+ bool "64 ops"
+ config B2R2_CHSIZE_128
+ bool "128 ops"
+endchoice
+
+choice
+ prompt "Message size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_MGSIZE_128
+
+ config B2R2_MGSIZE_1
+ bool "1 chunk"
+ config B2R2_MGSIZE_2
+ bool "2 chunks"
+ config B2R2_MGSIZE_4
+ bool "4 chunks"
+ config B2R2_MGSIZE_8
+ bool "8 s"
+ config B2R2_MGSIZE_16
+ bool "16 chunks"
+ config B2R2_MGSIZE_32
+ bool "32 chunks"
+ config B2R2_MGSIZE_64
+ bool "64 chunks"
+ config B2R2_MGSIZE_128
+ bool "128 chunks"
+endchoice
+
+choice
+ prompt "Page size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_PGSIZE_256
+
+ config B2R2_PGSIZE_64
+ bool "64 bytes"
+ config B2R2_PGSIZE_128
+ bool "128 bytes"
+ config B2R2_PGSIZE_256
+ bool "256 bytes"
+endchoice
+
+config B2R2_DEBUG
+ bool "B2R2 debugging"
+ default n
+ depends on FB_B2R2
+ help
+ Enable debugging features for the B2R2 driver.
+
+config B2R2_PROFILER
+ tristate "B2R2 profiler"
+ default n
+ depends on FB_B2R2
+ help
+ Enables the profiler for the B2R2 driver.
+
+ It is recommended to build this as a module, since the configuration
+ of filters etc. is done at load time.
+
+config B2R2_GENERIC
+ bool "B2R2 generic path"
+ default y
+ depends on FB_B2R2
+ help
+ Enables support for the generic path in the B2R2 driver. This path should
+ be used when there is no optimized implementation for a request.
+
+choice
+ prompt "Generic usage mode"
+ depends on B2R2_GENERIC
+ default B2R2_GENERIC_FALLBACK
+
+ config B2R2_GENERIC_FALLBACK
+ bool "Fallback"
+ help
+ The optimized path will be used for all supported operations, and the
+ generic path will be used as a fallback for the ones not implemented.
+
+ config B2R2_GENERIC_ONLY
+ bool "Always"
+ help
+ The generic path will be used for all operations.
+
+endchoice
diff --git a/drivers/video/b2r2/Makefile b/drivers/video/b2r2/Makefile
new file mode 100644
index 00000000000..f271f4e7ea1
--- /dev/null
+++ b/drivers/video/b2r2/Makefile
@@ -0,0 +1,15 @@
+# Make file for compiling and loadable module B2R2
+
+obj-$(CONFIG_FB_B2R2) += b2r2.o
+
+b2r2-objs = b2r2_api.o b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o b2r2_filters.o b2r2_utils.o b2r2_input_validation.o b2r2_hw_convert.o
+
+ifdef CONFIG_B2R2_DEBUG
+b2r2-objs += b2r2_debug.o
+endif
+
+ifeq ($(CONFIG_FB_B2R2),m)
+obj-y += b2r2_kernel_if.o
+endif
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler/
diff --git a/drivers/video/b2r2/b2r2_api.c b/drivers/video/b2r2/b2r2_api.c
new file mode 100644
index 00000000000..0361e85ebf3
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_api.c
@@ -0,0 +1,1643 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010/2012
+ *
+ * ST-Ericsson B2R2 Blitter module API
+ *
+ * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com>
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/hwmem.h>
+#include <linux/kref.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_control.h"
+#include "b2r2_core.h"
+#include "b2r2_timing.h"
+#include "b2r2_utils.h"
+#include "b2r2_debug.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_profiler_socket.h"
+#include "b2r2_hw.h"
+
+/*
+ * TODO:
+ * Implementation of query cap
+ * Support for user space virtual pointer to physically consecutive memory
+ * Support for user space virtual pointer to physically scattered memory
+ * Callback reads lagging behind in blt_api_stress app
+ * Store smaller items in the report list instead of the whole request
+ * Support read of many report records at once.
+ */
+
+#define DATAS_START_SIZE 10
+#define DATAS_GROW_SIZE 5
+
+/**
+ * @miscdev: The miscdev presenting b2r2 to the system
+ */
+struct b2r2_blt {
+ spinlock_t lock;
+ int next_job_id;
+ struct miscdevice miscdev;
+ struct device *dev;
+ struct mutex datas_lock;
+ /**
+ * datas - Stores the b2r2_blt_data mapped to the cliend handle
+ */
+ struct b2r2_blt_data **datas;
+ /**
+ * data_count - The current maximum of active datas
+ */
+ int data_count;
+};
+
+struct b2r2_blt_data {
+ struct b2r2_control_instance *ctl_instace[B2R2_MAX_NBR_DEVICES];
+};
+
+/**
+ * Used to keep track of coming and going b2r2 cores and
+ * the number of active instance references
+ */
+struct b2r2_control_ref {
+ struct b2r2_control *b2r2_control;
+ spinlock_t lock;
+};
+
+/**
+ * b2r2_blt - The blitter device, /dev/b2r2_blt
+ */
+struct kref blt_refcount;
+static struct b2r2_blt *b2r2_blt;
+
+/**
+ * b2r2_control - The core controls and synchronization mechanism
+ */
+static struct b2r2_control_ref b2r2_controls[B2R2_MAX_NBR_DEVICES];
+
+/**
+ * b2r2_blt_add_control - Add the b2r2 core control
+ */
+void b2r2_blt_add_control(struct b2r2_control *cont)
+{
+ unsigned long flags;
+ BUG_ON(cont->id < 0 || cont->id >= B2R2_MAX_NBR_DEVICES);
+
+ spin_lock_irqsave(&b2r2_controls[cont->id].lock, flags);
+ if (b2r2_controls[cont->id].b2r2_control == NULL)
+ b2r2_controls[cont->id].b2r2_control = cont;
+ spin_unlock_irqrestore(&b2r2_controls[cont->id].lock, flags);
+}
+
+/**
+ * b2r2_blt_remove_control - Remove the b2r2 core control
+ */
+void b2r2_blt_remove_control(struct b2r2_control *cont)
+{
+ unsigned long flags;
+ BUG_ON(cont->id < 0 || cont->id >= B2R2_MAX_NBR_DEVICES);
+
+ spin_lock_irqsave(&b2r2_controls[cont->id].lock, flags);
+ b2r2_controls[cont->id].b2r2_control = NULL;
+ spin_unlock_irqrestore(&b2r2_controls[cont->id].lock, flags);
+}
+
+/**
+ * b2r2_blt_get_control - Lock control for writing/removal
+ */
+static struct b2r2_control *b2r2_blt_get_control(int i)
+{
+ struct b2r2_control *cont;
+ unsigned long flags;
+ BUG_ON(i < 0 || i >= B2R2_MAX_NBR_DEVICES);
+
+ spin_lock_irqsave(&b2r2_controls[i].lock, flags);
+ cont = (struct b2r2_control *) b2r2_controls[i].b2r2_control;
+ if (cont != NULL) {
+ if (!cont->enabled)
+ cont = NULL;
+ else
+ kref_get(&cont->ref);
+ }
+ spin_unlock_irqrestore(&b2r2_controls[i].lock, flags);
+
+ return cont;
+}
+
+/**
+ * b2r2_blt_release_control - Unlock control for writing/removal
+ */
+static void b2r2_blt_release_control(int i)
+{
+ struct b2r2_control *cont;
+ unsigned long flags;
+ BUG_ON(i < 0 || i >= B2R2_MAX_NBR_DEVICES);
+
+ spin_lock_irqsave(&b2r2_controls[i].lock, flags);
+ cont = (struct b2r2_control *) b2r2_controls[i].b2r2_control;
+ spin_unlock_irqrestore(&b2r2_controls[i].lock, flags);
+ if (cont != NULL)
+ kref_put(&cont->ref, b2r2_core_release);
+}
+
+/**
+ * Increase size of array containing b2r2 handles
+ */
+static int grow_datas(void)
+{
+ struct b2r2_blt_data **new_datas = NULL;
+ int new_data_count = b2r2_blt->data_count + DATAS_GROW_SIZE;
+ int ret = 0;
+
+ new_datas = kzalloc(new_data_count * sizeof(*new_datas), GFP_KERNEL);
+ if (new_datas == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(new_datas, b2r2_blt->datas,
+ b2r2_blt->data_count * sizeof(*b2r2_blt->datas));
+
+ kfree(b2r2_blt->datas);
+
+ b2r2_blt->data_count = new_data_count;
+ b2r2_blt->datas = new_datas;
+exit:
+ return ret;
+}
+
+/**
+ * Allocate and/or reserve a b2r2 handle
+ */
+static int alloc_handle(struct b2r2_blt_data *blt_data)
+{
+ int handle;
+ int ret;
+
+ mutex_lock(&b2r2_blt->datas_lock);
+
+ if (b2r2_blt->datas == NULL) {
+ b2r2_blt->datas = kzalloc(
+ DATAS_START_SIZE * sizeof(*b2r2_blt->datas),
+ GFP_KERNEL);
+ if (b2r2_blt->datas == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ b2r2_blt->data_count = DATAS_START_SIZE;
+ }
+
+ for (handle = 0; handle < b2r2_blt->data_count; handle++) {
+ if (b2r2_blt->datas[handle] == NULL) {
+ b2r2_blt->datas[handle] = blt_data;
+ break;
+ }
+
+ if (handle == b2r2_blt->data_count - 1) {
+ ret = grow_datas();
+ if (ret < 0)
+ goto exit;
+ }
+ }
+ ret = handle;
+exit:
+ mutex_unlock(&b2r2_blt->datas_lock);
+
+ return ret;
+}
+
+/**
+ * Get b2r2 data from b2r2 handle
+ */
+static struct b2r2_blt_data *get_data(int handle)
+{
+ if (handle >= b2r2_blt->data_count || handle < 0)
+ return NULL;
+ else
+ return b2r2_blt->datas[handle];
+}
+
+/**
+ * Unreserve b2r2 handle
+ */
+static void free_handle(int handle)
+{
+ if (handle < b2r2_blt->data_count && handle >= 0)
+ b2r2_blt->datas[handle] = NULL;
+}
+
+/**
+ * Get the next job number. This is the one returned to the client
+ * if the blit request was successful.
+ */
+static int get_next_job_id(void)
+{
+ int job_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&b2r2_blt->lock, flags);
+ if (b2r2_blt->next_job_id < 1)
+ b2r2_blt->next_job_id = 1;
+ job_id = b2r2_blt->next_job_id++;
+ spin_unlock_irqrestore(&b2r2_blt->lock, flags);
+
+ return job_id;
+}
+
+/**
+ * Limit the number of cores used in some "easy" and impossible cases
+ */
+static int limit_blits(int n_split, struct b2r2_blt_req *user_req)
+{
+ if (n_split <= 1)
+ return n_split;
+
+ if (user_req->dst_rect.width < 24 && user_req->dst_rect.height < 24)
+ return 1;
+
+ if (user_req->src_rect.width < n_split &&
+ user_req->src_rect.height < n_split)
+ return 1;
+
+ return n_split;
+}
+
+/**
+ * Check if the format inherently requires the b2r2 scaling engine to be active
+ */
+static bool is_scaling_fmt(enum b2r2_blt_fmt fmt)
+{
+ /* Plane separated formats must be treated as scaling */
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * Check for macroblock formats
+ */
+static bool is_mb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * Split a request rectangle on available cores
+ */
+static int b2r2_blt_split_request(struct b2r2_blt_data *blt_data,
+ struct b2r2_blt_req *user_req,
+ struct b2r2_blt_request **split_requests,
+ struct b2r2_control_instance **ctl,
+ int *n_split)
+{
+ int sstep_x, sstep_y, dstep_x, dstep_y;
+ int dstart_x, dstart_y;
+ int bstart_x, bstart_y;
+ int dpos_x, dpos_y;
+ int bpos_x, bpos_y;
+ int dso_x = 1;
+ int dso_y = 1;
+ int sf_x, sf_y;
+ int i;
+ int srw, srh;
+ int drw, drh;
+ bool ssplit_x = true;
+ bool dsplit_x = true;
+ enum b2r2_blt_transform transform;
+ bool is_rotation = false;
+ bool is_scaling = false;
+ bool bg_blend = false;
+ u32 core_mask = 0;
+
+ srw = user_req->src_rect.width;
+ srh = user_req->src_rect.height;
+ drw = user_req->dst_rect.width;
+ drh = user_req->dst_rect.height;
+ transform = user_req->transform;
+
+ /* Early exit in the basic cases */
+ if (*n_split == 0) {
+ return -ENOSYS;
+ } else if (*n_split == 1 ||
+ (srw < *n_split && srh < *n_split) ||
+ (drw < *n_split && drh < *n_split) ||
+ is_mb_fmt(user_req->src_img.fmt)) {
+ /* Handle macroblock formats with one
+ * core for now since there seems to be some bug
+ * related to macroblock access patterns
+ */
+ memcpy(&split_requests[0]->user_req,
+ user_req,
+ sizeof(*user_req));
+ split_requests[0]->core_mask = 1;
+ *n_split = 1;
+ return 0;
+ }
+
+ /*
+ * TODO: fix the load balancing algorithm
+ */
+
+ is_rotation = (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0;
+
+ /* Check for scaling */
+ if (is_rotation) {
+ is_scaling = (user_req->src_rect.width !=
+ user_req->dst_rect.height) ||
+ (user_req->src_rect.height !=
+ user_req->dst_rect.width);
+ } else {
+ is_scaling = (user_req->src_rect.width !=
+ user_req->dst_rect.width) ||
+ (user_req->src_rect.height !=
+ user_req->dst_rect.height);
+ }
+
+ is_scaling = is_scaling ||
+ is_scaling_fmt(user_req->src_img.fmt) ||
+ is_scaling_fmt(user_req->dst_img.fmt);
+
+ bg_blend = ((user_req->flags & B2R2_BLT_FLAG_BG_BLEND) != 0);
+
+ /*
+ * Split the request
+ */
+
+ b2r2_log_info(b2r2_blt->dev, "%s: In (t:0x%08X, f:0x%08X):\n"
+ "\tsrc_rect x:%d, y:%d, w:%d, h:%d src fmt:0x%x\n"
+ "\tdst_rect x:%d, y:%d, w:%d, h:%d dst fmt:0x%x\n",
+ __func__,
+ user_req->transform,
+ user_req->flags,
+ user_req->src_rect.x,
+ user_req->src_rect.y,
+ user_req->src_rect.width,
+ user_req->src_rect.height,
+ user_req->src_img.fmt,
+ user_req->dst_rect.x,
+ user_req->dst_rect.y,
+ user_req->dst_rect.width,
+ user_req->dst_rect.height,
+ user_req->dst_img.fmt);
+
+ /* TODO: We need sub pixel precision here,
+ * or a better way to split rects */
+ dstart_x = user_req->dst_rect.x;
+ dstart_y = user_req->dst_rect.y;
+ if (bg_blend) {
+ bstart_x = user_req->bg_rect.x;
+ bstart_y = user_req->bg_rect.y;
+ }
+
+ if (srw && srh) {
+ if ((srw < srh) && !is_scaling) {
+ ssplit_x = false;
+ sstep_y = srh / *n_split;
+ /* Round up */
+ if (srh % (*n_split))
+ sstep_y++;
+
+ if (srh > 16)
+ sstep_y = ((sstep_y + 16) >> 4) << 4;
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ sf_y = (drw << 10) / srh;
+ dstep_x = (sf_y * sstep_y) >> 10;
+ } else {
+ dsplit_x = false;
+ sf_y = (drh << 10) / srh;
+ dstep_y = (sf_y * sstep_y) >> 10;
+ }
+ } else {
+ sstep_x = srw / *n_split;
+ /* Round up */
+ if (srw % (*n_split))
+ sstep_x++;
+
+ if (is_scaling) {
+ int scale_step_size =
+ B2R2_RESCALE_MAX_WIDTH - 1;
+ int pad = (scale_step_size -
+ (sstep_x % scale_step_size));
+ if ((sstep_x + pad) < srw)
+ sstep_x += pad;
+ } else {
+ /* Aim for even 16px multiples */
+ if ((sstep_x & 0xF) && ((sstep_x + 16) < srw))
+ sstep_x = ((sstep_x + 16) >> 4) << 4;
+ }
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ dsplit_x = false;
+ sf_x = (drh << 10) / srw;
+ dstep_y = (sf_x * sstep_x) >> 10;
+ } else {
+ sf_x = (drw << 10) / srw;
+ dstep_x = (sf_x * sstep_x) >> 10;
+ }
+ }
+
+ } else {
+ sstep_x = sstep_y = 0;
+
+ if (drw < drh) {
+ dsplit_x = false;
+ dstep_y = drh / *n_split;
+ /* Round up */
+ if (drh % *n_split)
+ dstep_y++;
+
+ /* Aim for even 16px multiples */
+ if ((dstep_y & 0xF) && ((dstep_y + 16) < drh))
+ dstep_y = ((dstep_y + 16) >> 4) << 4;
+ } else {
+ dstep_x = drw / *n_split;
+ /* Round up */
+ if (drw % *n_split)
+ dstep_x++;
+
+ /* Aim for even 16px multiples */
+ if ((dstep_x & 0xF) && ((dstep_x + 16) < drw))
+ dstep_x = ((dstep_x + 16) >> 4) << 4;
+ }
+ }
+
+ /* Check for flip and rotate to establish destination
+ * step order */
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ dstart_x += drw;
+ if (bg_blend)
+ bstart_x += drw;
+ dso_x = -1;
+ }
+ if ((transform & B2R2_BLT_TRANSFORM_FLIP_V) ||
+ (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90)) {
+ dstart_y += drh;
+ if (bg_blend)
+ bstart_y += drh;
+ dso_y = -1;
+ }
+
+ /* Set scan starting position */
+ dpos_x = dstart_x;
+ dpos_y = dstart_y;
+ if (bg_blend) {
+ bpos_x = bstart_x;
+ bpos_y = bstart_y;
+ }
+
+ for (i = 0; i < *n_split; i++) {
+ struct b2r2_blt_req *sreq =
+ &split_requests[i]->user_req;
+
+ /* First mimic all */
+ memcpy(sreq, user_req, sizeof(*user_req));
+
+ /* Then change the rects */
+ if (srw && srh) {
+ if (ssplit_x) {
+ if (sstep_x > 0) {
+ sreq->src_rect.width =
+ min(sstep_x, srw);
+ sreq->src_rect.x += i*sstep_x;
+ srw -= sstep_x;
+ } else {
+ sreq->src_rect.width = srw;
+ }
+ } else {
+ if (sstep_y > 0) {
+ sreq->src_rect.y += i*sstep_y;
+ sreq->src_rect.height =
+ min(sstep_y, srh);
+ srh -= sstep_y;
+ } else {
+ sreq->src_rect.height = srh;
+ }
+ }
+ }
+
+ if (dsplit_x) {
+ int sx = min(dstep_x, drw);
+ if (dso_x < 0) {
+ dpos_x += dso_x * sx;
+ if (bg_blend)
+ bpos_x += dso_x * sx;
+ }
+ sreq->dst_rect.width = sx;
+ sreq->dst_rect.x = dpos_x;
+ if (bg_blend) {
+ sreq->bg_rect.width = sx;
+ sreq->bg_rect.x = bpos_x;
+ }
+ if (dso_x > 0) {
+ dpos_x += dso_x * sx;
+ if (bg_blend)
+ bpos_x += dso_x * sx;
+ }
+ drw -= sx;
+ } else {
+ int sy = min(dstep_y, drh);
+ if (dso_y < 0) {
+ dpos_y += dso_y * sy;
+ if (bg_blend)
+ bpos_y += dso_y * sy;
+ }
+ sreq->dst_rect.height = sy;
+ sreq->dst_rect.y = dpos_y;
+ if (bg_blend) {
+ sreq->bg_rect.height = sy;
+ sreq->bg_rect.y = bpos_y;
+ }
+ if (dso_y > 0) {
+ dpos_y += dso_y * sy;
+ if (bg_blend)
+ bpos_y += dso_y * sy;
+ }
+ drh -= sy;
+ }
+
+ b2r2_log_info(b2r2_blt->dev, "%s: Out:\n"
+ "\tsrc_rect x:%d, y:%d, w:%d, h:%d\n"
+ "\tdst_rect x:%d, y:%d, w:%d, h:%d\n"
+ "\tbg_rect x:%d, y:%d, w:%d, h:%d\n",
+ __func__,
+ sreq->src_rect.x,
+ sreq->src_rect.y,
+ sreq->src_rect.width,
+ sreq->src_rect.height,
+ sreq->dst_rect.x,
+ sreq->dst_rect.y,
+ sreq->dst_rect.width,
+ sreq->dst_rect.height,
+ sreq->bg_rect.x,
+ sreq->bg_rect.y,
+ sreq->bg_rect.width,
+ sreq->bg_rect.height);
+
+ core_mask |= (1 << i);
+ }
+
+ for (i = 0; i < *n_split; i++)
+ split_requests[i]->core_mask = core_mask;
+
+ return 0;
+}
+
+/**
+ * Get available b2r2 control instances. It will be limited
+ * to the number of cores available at the current point in time.
+ * It will also cause the cores to stay active during the time until
+ * release_control_instances is called.
+ */
+static void get_control_instances(struct b2r2_blt_data *blt_data,
+ struct b2r2_control_instance **ctl, int max_size,
+ int *count)
+{
+ int i;
+
+ *count = 0;
+ for (i = 0; i < max_size; i++) {
+ struct b2r2_control_instance *ci = blt_data->ctl_instace[i];
+ if (ci) {
+ struct b2r2_control *cont =
+ b2r2_blt_get_control(ci->control_id);
+ if (cont) {
+ ctl[*count] = ci;
+ *count += 1;
+ }
+ }
+ }
+}
+
+/**
+ * Release b2r2 control instances. The cores allocated for the request
+ * are given back.
+ */
+static void release_control_instances(struct b2r2_control_instance **ctl,
+ int count)
+{
+ int i;
+
+ /* Release the handles to the core controls */
+ for (i = 0; i < count; i++) {
+ if (ctl[i])
+ b2r2_blt_release_control(ctl[i]->control_id);
+ }
+}
+
+/**
+ * Free b2r2 request
+ */
+static void b2r2_free_request(struct b2r2_blt_request *request)
+{
+ if (request) {
+ /* Free requests in split_requests */
+ if (request->clut)
+ dma_free_coherent(b2r2_blt->dev,
+ CLUT_SIZE,
+ request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ kfree(request);
+ }
+}
+
+/**
+ * Allocate internal b2r2 request based on user input.
+ */
+static int b2r2_alloc_request(struct b2r2_blt_req *user_req,
+ bool us_req, struct b2r2_blt_request **request_out)
+{
+ int ret = 0;
+ struct b2r2_blt_request *request =
+ kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ /* Initialize the structure */
+ INIT_LIST_HEAD(&request->list);
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((user_req->flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ request->clut = dma_alloc_coherent(
+ b2r2_blt->dev,
+ CLUT_SIZE,
+ &(request->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request->clut == NULL) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s CLUT allocation "
+ "failed.\n", __func__);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (us_req) {
+ if (copy_from_user(request->clut,
+ user_req->clut, CLUT_SIZE)) {
+ b2r2_log_err(b2r2_blt->dev, "%s: CLUT "
+ "copy_from_user failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ } else {
+ memcpy(request->clut, user_req->clut,
+ CLUT_SIZE);
+ }
+ }
+
+ request->profile = is_profiler_registered_approx();
+
+ *request_out = request;
+exit:
+ if (ret != 0)
+ b2r2_free_request(request);
+
+ return ret;
+}
+
+/**
+ * Do the blit job split on available cores.
+ */
+static int b2r2_blt_blit_internal(int handle,
+ struct b2r2_blt_req *user_req,
+ bool us_req)
+{
+ int request_id;
+ int i;
+ int n_instance = 0;
+ int n_blit = 0;
+ int ret = 0;
+ struct b2r2_blt_data *blt_data;
+ struct b2r2_blt_req ureq;
+
+ /* The requests and the designated workers */
+ struct b2r2_blt_request *split_requests[B2R2_MAX_NBR_DEVICES];
+ struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES];
+
+ blt_data = get_data(handle);
+ if (blt_data == NULL) {
+ b2r2_log_warn(b2r2_blt->dev,
+ "%s, blitter instance not found (handle=%d)\n",
+ __func__, handle);
+ return -ENOSYS;
+ }
+
+ /* Get the b2r2 core controls for the job */
+ get_control_instances(blt_data, ctl, B2R2_MAX_NBR_DEVICES, &n_instance);
+ if (n_instance == 0) {
+ b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ /* Get the user data */
+ if (us_req) {
+ if (copy_from_user(&ureq, user_req, sizeof(ureq))) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: copy_from_user failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ } else {
+ memcpy(&ureq, user_req, sizeof(ureq));
+ }
+
+ /*
+ * B2R2 cannot handle destination clipping on buffers
+ * allocated close to 64MiB bank boundaries.
+ * recalculate src_ and dst_rect to avoid clipping.
+ *
+ * Also this is needed to ensure the request split
+ * operates on visible areas
+ */
+ b2r2_recalculate_rects(b2r2_blt->dev, &ureq);
+
+ if (!b2r2_validate_user_req(b2r2_blt->dev, &ureq)) {
+ b2r2_log_warn(b2r2_blt->dev,
+ "%s: b2r2_validate_user_req failed.\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Don't split small requests */
+ n_blit = limit_blits(n_instance, &ureq);
+
+ /* The id needs to be universal on
+ * all cores */
+ request_id = get_next_job_id();
+
+#ifdef CONFIG_B2R2_GENERIC_ONLY
+ /* Limit the generic only solution to one core (for now) */
+ n_blit = 1;
+#endif
+
+ for (i = 0; i < n_blit; i++) {
+ ret = b2r2_alloc_request(&ureq, us_req, &split_requests[i]);
+ if (ret < 0 || !split_requests[i]) {
+ b2r2_log_err(b2r2_blt->dev, "%s: Failed to alloc mem\n",
+ __func__);
+ ret = -ENOMEM;
+ break;
+ }
+ split_requests[i]->instance = ctl[i];
+ split_requests[i]->job.job_id = request_id;
+ split_requests[i]->job.data = (int) ctl[i]->control->data;
+ }
+
+ /* Split the request */
+ if (ret >= 0)
+ ret = b2r2_blt_split_request(blt_data, &ureq,
+ &split_requests[0], &ctl[0], &n_blit);
+
+ /* If anything failed, clean up allocated memory */
+ if (ret < 0) {
+ for (i = 0; i < n_blit; i++)
+ b2r2_free_request(split_requests[i]);
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: b2r2_blt_split_request failed.\n",
+ __func__);
+ goto exit;
+ }
+
+#ifdef CONFIG_B2R2_GENERIC_ONLY
+ if (ureq.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* No support for BG BLEND in generic
+ * implementation yet */
+ b2r2_log_warn(b2r2_blt->dev, "%s: Unsupported: "
+ "Background blend in b2r2_generic_blt\n",
+ __func__);
+ ret = -ENOSYS;
+ b2r2_free_request(split_requests[0]);
+ goto exit;
+ }
+ /* Use the generic path for all operations */
+ ret = b2r2_generic_blt(split_requests[0]);
+#else
+ /* Call each blitter control */
+ for (i = 0; i < n_blit; i++) {
+ ret = b2r2_control_blt(split_requests[i]);
+ if (ret < 0) {
+ b2r2_log_warn(b2r2_blt->dev,
+ "%s: b2r2_control_blt failed.\n", __func__);
+ break;
+ }
+ }
+ if (ret != -ENOSYS) {
+ int j;
+ /* TODO: if one blitter fails then cancel the jobs added */
+
+ /* Call waitjob for successful jobs
+ * (synchs if specified in request) */
+ if (ureq.flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit;
+
+ for (j = 0; j < i; j++) {
+ int rtmp;
+
+ rtmp = b2r2_control_waitjob(split_requests[j]);
+ if (rtmp < 0) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: b2r2_control_waitjob failed.\n",
+ __func__);
+ }
+
+ /* Save just the one error */
+ ret = (ret >= 0) ? rtmp : ret;
+ }
+ }
+#endif
+#ifdef CONFIG_B2R2_GENERIC_FALLBACK
+ if (ret == -ENOSYS) {
+ struct b2r2_blt_request *request_gen = NULL;
+ if (ureq.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* No support for BG BLEND in generic
+ * implementation yet */
+ b2r2_log_warn(b2r2_blt->dev, "%s: Unsupported: "
+ "Background blend in b2r2_generic_blt\n",
+ __func__);
+ goto exit;
+ }
+
+ b2r2_log_info(b2r2_blt->dev,
+ "b2r2_blt=%d Going generic.\n", ret);
+ ret = b2r2_alloc_request(&ureq, us_req, &request_gen);
+ if (ret < 0 || !request_gen) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: Failed to alloc mem for "
+ "request_gen\n", __func__);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Initialize the structure */
+ request_gen->instance = ctl[0];
+ memcpy(&request_gen->user_req, &ureq,
+ sizeof(request_gen->user_req));
+ request_gen->core_mask = 1;
+ request_gen->job.job_id = request_id;
+ request_gen->job.data = (int) ctl[0]->control->data;
+
+ ret = b2r2_generic_blt(request_gen);
+ b2r2_log_info(b2r2_blt->dev, "\nb2r2_generic_blt=%d "
+ "Generic done.\n", ret);
+ }
+#endif
+exit:
+ release_control_instances(ctl, n_instance);
+
+ ret = ret >= 0 ? request_id : ret;
+
+ return ret;
+}
+
+/**
+ * Free the memory used for the b2r2_blt device
+ */
+static void b2r2_blt_release(struct kref *ref)
+{
+ BUG_ON(b2r2_blt == NULL);
+ if (b2r2_blt == NULL)
+ return;
+ kfree(b2r2_blt->datas);
+ kfree(b2r2_blt);
+ b2r2_blt = NULL;
+}
+
+int b2r2_blt_open(void)
+{
+ int ret = 0;
+ struct b2r2_blt_data *blt_data = NULL;
+ int i;
+
+ if (!atomic_inc_not_zero(&blt_refcount.refcount))
+ return -ENOSYS;
+
+ /* Allocate blitter instance data structure */
+ blt_data = (struct b2r2_blt_data *)
+ kzalloc(sizeof(*blt_data), GFP_KERNEL);
+ if (!blt_data) {
+ b2r2_log_err(b2r2_blt->dev, "%s: Failed to alloc\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) {
+ struct b2r2_control *control = b2r2_blt_get_control(i);
+ if (control != NULL) {
+ struct b2r2_control_instance *ci;
+
+ /* Allocate and initialize the control instance */
+ ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+ if (!ci) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: Failed to alloc\n",
+ __func__);
+ ret = -ENOMEM;
+ b2r2_blt_release_control(i);
+ goto err;
+ }
+ ci->control_id = i;
+ ci->control = control;
+ ret = b2r2_control_open(ci);
+ if (ret < 0) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: Failed to open b2r2 control %d\n",
+ __func__, i);
+ kfree(ci);
+ b2r2_blt_release_control(i);
+ goto err;
+ }
+ blt_data->ctl_instace[i] = ci;
+ b2r2_blt_release_control(i);
+ } else {
+ blt_data->ctl_instace[i] = NULL;
+ }
+ }
+
+ /* TODO: Create kernel worker kthread */
+
+ ret = alloc_handle(blt_data);
+ if (ret < 0)
+ goto err;
+
+ kref_put(&blt_refcount, b2r2_blt_release);
+
+ return ret;
+
+err:
+ /* Destroy the blitter instance data structure */
+ if (blt_data) {
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++)
+ kfree(blt_data->ctl_instace[i]);
+ kfree(blt_data);
+ }
+
+ kref_put(&blt_refcount, b2r2_blt_release);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_blt_open);
+
+int b2r2_blt_close(int handle)
+{
+ int i;
+ struct b2r2_blt_data *blt_data;
+ int ret = 0;
+
+ if (!atomic_inc_not_zero(&blt_refcount.refcount))
+ return -ENOSYS;
+
+ b2r2_log_info(b2r2_blt->dev, "%s\n", __func__);
+
+ blt_data = get_data(handle);
+ if (blt_data == NULL) {
+ b2r2_log_warn(b2r2_blt->dev,
+ "%s, blitter data not found (handle=%d)\n",
+ __func__, handle);
+ ret = -ENOSYS;
+ goto exit;
+ }
+ free_handle(handle);
+
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) {
+ struct b2r2_control_instance *ci =
+ blt_data->ctl_instace[i];
+ if (ci != NULL) {
+ struct b2r2_control *cont =
+ b2r2_blt_get_control(ci->control_id);
+ if (cont) {
+ /* Release the instance */
+ b2r2_control_release(ci);
+ b2r2_blt_release_control(ci->control_id);
+ }
+ kfree(ci);
+ }
+ }
+ kfree(blt_data);
+
+exit:
+ kref_put(&blt_refcount, b2r2_blt_release);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_blt_close);
+
+int b2r2_blt_request(int handle,
+ struct b2r2_blt_req *user_req)
+{
+ int ret = 0;
+
+ if (!atomic_inc_not_zero(&blt_refcount.refcount))
+ return -ENOSYS;
+
+ /* Exclude some currently unsupported cases */
+ if ((user_req->flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) ||
+ (user_req->flags & B2R2_BLT_FLAG_REPORT_PERFORMANCE) ||
+ (user_req->report1 != 0)) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s No callback support in the kernel API\n",
+ __func__);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ ret = b2r2_blt_blit_internal(handle, user_req, false);
+
+exit:
+ kref_put(&blt_refcount, b2r2_blt_release);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_blt_request);
+
+int b2r2_blt_synch(int handle, int request_id)
+{
+ int ret = 0;
+ int i;
+ int n_synch = 0;
+ struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES];
+ struct b2r2_blt_data *blt_data;
+
+ if (!atomic_inc_not_zero(&blt_refcount.refcount))
+ return -ENOSYS;
+
+ b2r2_log_info(b2r2_blt->dev, "%s\n", __func__);
+
+ blt_data = get_data(handle);
+ if (blt_data == NULL) {
+ b2r2_log_warn(b2r2_blt->dev,
+ "%s, blitter data not found (handle=%d)\n",
+ __func__, handle);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ /* Get the b2r2 core controls for the job */
+ get_control_instances(blt_data, ctl, B2R2_MAX_NBR_DEVICES, &n_synch);
+ if (n_synch == 0) {
+ b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n",
+ __func__);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ for (i = 0; i < n_synch; i++) {
+ ret = b2r2_control_synch(ctl[i], request_id);
+ if (ret != 0) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: b2r2_control_synch failed.\n",
+ __func__);
+ break;
+ }
+ }
+
+ /* Release the handles to the core controls */
+ release_control_instances(ctl, n_synch);
+
+exit:
+ kref_put(&blt_refcount, b2r2_blt_release);
+
+ b2r2_log_info(b2r2_blt->dev,
+ "%s, request_id=%d, returns %d\n", __func__, request_id, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_blt_synch);
+
+/**
+ * The user space API
+ */
+
+/**
+ * b2r2_blt_open_us - Implements file open on the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * A b2r2_blt_data handle is created and stored in the file structure.
+ */
+static int b2r2_blt_open_us(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+ int handle;
+
+ handle = b2r2_blt_open();
+ if (handle < 0) {
+ b2r2_log_err(b2r2_blt->dev, "%s: Failed to open handle\n",
+ __func__);
+ ret = handle;
+ goto exit;
+ }
+ filp->private_data = (void *) handle;
+exit:
+ return ret;
+}
+
+/**
+ * b2r2_blt_release_us - Implements last close on an instance of
+ * the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * All active jobs are finished or cancelled and allocated data
+ * is released.
+ */
+static int b2r2_blt_release_us(struct inode *inode, struct file *filp)
+{
+ int ret;
+ ret = b2r2_blt_close((int) filp->private_data);
+ return ret;
+}
+
+/**
+ * Query B2R2 capabilities
+ *
+ * @blt_data: The B2R2 BLT instance
+ * @query_cap: The structure receiving the capabilities
+ */
+static int b2r2_blt_query_cap(struct b2r2_blt_data *blt_data,
+ struct b2r2_blt_query_cap *query_cap)
+{
+ /* FIXME: Not implemented yet */
+ return -ENOSYS;
+}
+
+/**
+ * b2r2_blt_ioctl_us - This routine implements b2r2_blt ioctl interface
+ *
+ * @file: file pointer.
+ * @cmd :ioctl command.
+ * @arg: input argument for ioctl.
+ *
+ * Returns 0 if OK else negative error code
+ */
+static long b2r2_blt_ioctl_us(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ int handle = (int) file->private_data;
+
+ /** Process actual ioctl */
+ b2r2_log_info(b2r2_blt->dev, "%s\n", __func__);
+
+ /* Get the instance from the file structure */
+ switch (cmd) {
+ case B2R2_BLT_IOC: {
+ /* arg is user pointer to struct b2r2_blt_request */
+ ret = b2r2_blt_blit_internal(handle,
+ (struct b2r2_blt_req *) arg, true);
+ break;
+ }
+
+ case B2R2_BLT_SYNCH_IOC:
+ /* arg is request_id */
+ ret = b2r2_blt_synch(handle, (int) arg);
+ break;
+
+ case B2R2_BLT_QUERY_CAP_IOC: {
+ /* Arg is struct b2r2_blt_query_cap */
+ struct b2r2_blt_query_cap query_cap;
+ struct b2r2_blt_data *blt_data = get_data(handle);
+
+ /* Get the user data */
+ if (copy_from_user(&query_cap, (void *)arg,
+ sizeof(query_cap))) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: copy_from_user failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* Fill in our capabilities */
+ ret = b2r2_blt_query_cap(blt_data, &query_cap);
+
+ /* Return data to user */
+ if (copy_to_user((void *)arg, &query_cap,
+ sizeof(query_cap))) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: copy_to_user failed\n",
+ __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ break;
+ }
+
+ default:
+ /* Unknown command */
+ b2r2_log_err(b2r2_blt->dev, "%s: Unknown cmd %d\n",
+ __func__, cmd);
+ ret = -EINVAL;
+ break;
+
+ }
+
+exit:
+ if (ret < 0)
+ b2r2_log_err(b2r2_blt->dev, "%s: Return with error %d!\n",
+ __func__, -ret);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_poll - Support for user-space poll, select & epoll.
+ * Used for user-space callback
+ *
+ * @filp: File to poll on
+ * @wait: Poll table to wait on
+ *
+ * This function checks if there are anything to read
+ */
+static unsigned b2r2_blt_poll_us(struct file *filp, poll_table *wait)
+{
+ struct b2r2_blt_data *blt_data =
+ (struct b2r2_blt_data *) filp->private_data;
+ struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES];
+ unsigned int ret = POLLIN | POLLRDNORM;
+ int n_poll = 0;
+ int i;
+
+ b2r2_log_info(b2r2_blt->dev, "%s\n", __func__);
+
+ /* Get the b2r2 core controls for the job */
+ get_control_instances(blt_data, ctl, B2R2_MAX_NBR_DEVICES, &n_poll);
+ if (n_poll == 0) {
+ b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n",
+ __func__);
+ ret = -ENOSYS;
+ goto exit;
+ }
+
+ /* Poll each core control instance */
+ for (i = 0; i < n_poll && ret != 0; i++) {
+ poll_wait(filp, &ctl[i]->report_list_waitq, wait);
+ mutex_lock(&ctl[i]->lock);
+ if (list_empty(&ctl[i]->report_list))
+ ret = 0; /* No reports */
+ mutex_unlock(&ctl[i]->lock);
+ }
+
+ /* Release the handles to the core controls */
+ release_control_instances(ctl, n_poll);
+
+exit:
+ b2r2_log_info(b2r2_blt->dev, "%s: returns %d, n_poll: %d\n",
+ __func__, ret, n_poll);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_read - Read report data, user for user-space callback
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static ssize_t b2r2_blt_read_us(struct file *filp,
+ char __user *buf, size_t count, loff_t *f_pos)
+{
+ int ret = 0;
+ int n_read = 0;
+ int i;
+ int first_index = 0;
+ struct b2r2_blt_report report;
+ struct b2r2_blt_request *requests[B2R2_MAX_NBR_DEVICES];
+ struct b2r2_blt_data *blt_data =
+ (struct b2r2_blt_data *) filp->private_data;
+ struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES];
+ struct b2r2_control_instance *first = NULL;
+ bool block = ((filp->f_flags & O_NONBLOCK) == 0);
+ u32 core_mask = 0;
+
+ b2r2_log_info(b2r2_blt->dev, "%s\n", __func__);
+
+ /*
+ * We return only complete report records, one at a time.
+ * Might be more efficient to support read of many.
+ */
+ count = (count / sizeof(struct b2r2_blt_report)) *
+ sizeof(struct b2r2_blt_report);
+ if (count > sizeof(struct b2r2_blt_report))
+ count = sizeof(struct b2r2_blt_report);
+ if (count == 0)
+ return count;
+
+ memset(ctl, 0, sizeof(*ctl) * B2R2_MAX_NBR_DEVICES);
+ /* Get the b2r2 core controls for the job */
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) {
+ struct b2r2_control_instance *ci = blt_data->ctl_instace[i];
+ if (ci) {
+ struct b2r2_control *cont =
+ b2r2_blt_get_control(ci->control_id);
+ if (cont) {
+ ctl[i] = ci;
+ n_read++;
+ }
+ }
+ }
+ if (n_read == 0) {
+ b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ /* Find which control to ask for a report first */
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) {
+ if (ctl[i] != NULL) {
+ first = ctl[i];
+ first_index = i;
+ break;
+ }
+ }
+ if (!first) {
+ b2r2_log_err(b2r2_blt->dev, "%s: Internal error.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ memset(requests, 0, sizeof(*requests) * B2R2_MAX_NBR_DEVICES);
+ /* Read report from core 0 */
+ ret = b2r2_control_read(first, &requests[first_index], block);
+ if (ret <= 0 || requests[0] == NULL) {
+ b2r2_log_err(b2r2_blt->dev, "%s: b2r2_control_read failed.\n",
+ __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ core_mask = requests[first_index]->core_mask >> 1;
+ core_mask &= ~(1 << first_index);
+
+ /* If there are any more cores, try reading the report
+ * with the specific ID from the other cores */
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) {
+ if ((core_mask & 1) && (ctl[i] != NULL)) {
+ /* TODO: Do we need to wait here? */
+ ret = b2r2_control_read_id(ctl[i], &requests[i], block,
+ requests[first_index]->request_id);
+ if (ret <= 0 || requests[i] == NULL) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: b2r2_control_read failed.\n",
+ __func__);
+ break;
+ }
+ }
+ core_mask = core_mask >> 1;
+ }
+
+ if (ret > 0) {
+ /* Construct a report and copy to userland */
+ report.request_id = requests[0]->request_id;
+ report.report1 = requests[0]->user_req.report1;
+ report.report2 = requests[0]->user_req.report2;
+ report.usec_elapsed = 0; /* TBD */
+
+ if (copy_to_user(buf, &report, sizeof(report))) {
+ b2r2_log_err(b2r2_blt->dev,
+ "%s: copy_to_user failed.\n",
+ __func__);
+ ret = -EFAULT;
+ }
+ }
+
+ if (ret > 0) {
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++)
+ /*
+ * Release matching the addref when the job was put
+ * into the report list
+ */
+ if (requests[i] != NULL)
+ b2r2_core_job_release(&requests[i]->job,
+ __func__);
+ } else {
+ /* We failed at one core or copy to user failed */
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++)
+ if (requests[i] != NULL)
+ list_add(&requests[i]->list,
+ &ctl[i]->report_list);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ /* Release the handles to the core controls */
+ release_control_instances(ctl, n_read);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_fops - File operations for b2r2_blt
+ */
+static const struct file_operations b2r2_blt_fops = {
+ .owner = THIS_MODULE,
+ .open = b2r2_blt_open_us,
+ .release = b2r2_blt_release_us,
+ .unlocked_ioctl = b2r2_blt_ioctl_us,
+ .poll = b2r2_blt_poll_us,
+ .read = b2r2_blt_read_us,
+};
+
+
+/**
+ * b2r2_probe() - This routine loads the B2R2 core driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_blt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+
+ BUG_ON(pdev == NULL);
+
+ dev_info(&pdev->dev, "%s start.\n", __func__);
+
+ if (!b2r2_blt) {
+ b2r2_blt = kzalloc(sizeof(*b2r2_blt), GFP_KERNEL);
+ if (!b2r2_blt) {
+ dev_err(&pdev->dev, "b2r2_blt alloc failed\n");
+ ret = -EINVAL;
+ goto error_exit;
+ }
+
+ /* Init b2r2 core control reference counters */
+ for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++)
+ spin_lock_init(&b2r2_controls[i].lock);
+ }
+
+ mutex_init(&b2r2_blt->datas_lock);
+ spin_lock_init(&b2r2_blt->lock);
+ b2r2_blt->dev = &pdev->dev;
+
+ /* Register b2r2 driver */
+ b2r2_blt->miscdev.parent = b2r2_blt->dev;
+ b2r2_blt->miscdev.minor = MISC_DYNAMIC_MINOR;
+ b2r2_blt->miscdev.name = "b2r2_blt";
+ b2r2_blt->miscdev.fops = &b2r2_blt_fops;
+
+ ret = misc_register(&b2r2_blt->miscdev);
+ if (ret != 0) {
+ printk(KERN_WARNING "%s: registering misc device fails\n",
+ __func__);
+ goto error_exit;
+ }
+
+ b2r2_blt->dev = b2r2_blt->miscdev.this_device;
+ b2r2_blt->dev->coherent_dma_mask = 0xFFFFFFFF;
+
+ kref_init(&blt_refcount);
+
+ dev_info(&pdev->dev, "%s done.\n", __func__);
+
+ return ret;
+
+/** Recover from any error if something fails */
+error_exit:
+
+ kfree(b2r2_blt);
+
+ dev_info(&pdev->dev, "%s done with errors (%d).\n", __func__, ret);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_remove - This routine unloads b2r2_blt driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_blt_remove(struct platform_device *pdev)
+{
+ BUG_ON(pdev == NULL);
+ dev_info(&pdev->dev, "%s started.\n", __func__);
+ misc_deregister(&b2r2_blt->miscdev);
+ kref_put(&blt_refcount, b2r2_blt_release);
+ return 0;
+}
+
+/**
+ * b2r2_blt_suspend() - This routine puts the B2R2 blitter in to sustend state.
+ * @pdev: platform device.
+ *
+ * This routine stores the current state of the b2r2 device and puts in to
+ * suspend state.
+ *
+ */
+int b2r2_blt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+/**
+ * b2r2_blt_resume() - This routine resumes the B2R2 blitter from sustend state.
+ * @pdev: platform device.
+ *
+ * This routine restore back the current state of the b2r2 device resumes.
+ *
+ */
+int b2r2_blt_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+/**
+ * struct platform_b2r2_driver - Platform driver configuration for the
+ * B2R2 core driver
+ */
+static struct platform_driver platform_b2r2_blt_driver = {
+ .remove = b2r2_blt_remove,
+ .driver = {
+ .name = "b2r2_blt",
+ },
+ .suspend = b2r2_blt_suspend,
+ .resume = b2r2_blt_resume,
+};
+
+/**
+ * b2r2_init() - Module init function for the B2R2 core module
+ */
+static int __init b2r2_blt_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return platform_driver_probe(&platform_b2r2_blt_driver, b2r2_blt_probe);
+}
+module_init(b2r2_blt_init);
+
+/**
+ * b2r2_exit() - Module exit function for the B2R2 core module
+ */
+static void __exit b2r2_blt_exit(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ platform_driver_unregister(&platform_b2r2_blt_driver);
+ return;
+}
+module_exit(b2r2_blt_exit);
+
+MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c
new file mode 100644
index 00000000000..3727f742bf1
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_blt_main.c
@@ -0,0 +1,2840 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 Blitter module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/hwmem.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_control.h"
+#include "b2r2_node_split.h"
+#include "b2r2_generic.h"
+#include "b2r2_mem_alloc.h"
+#include "b2r2_profiler_socket.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_core.h"
+#include "b2r2_filters.h"
+
+#define B2R2_HEAP_SIZE (4 * PAGE_SIZE)
+#define MAX_TMP_BUF_SIZE (128 * PAGE_SIZE)
+
+/*
+ * TODO:
+ * Implementation of query cap
+ * Support for user space virtual pointer to physically consecutive memory
+ * Support for user space virtual pointer to physically scattered memory
+ * Callback reads lagging behind in blt_api_stress app
+ * Store smaller items in the report list instead of the whole request
+ * Support read of many report records at once.
+ */
+
+/* Local functions */
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat);
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat);
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+static void job_callback(struct b2r2_core_job *job);
+static void job_release(struct b2r2_core_job *job);
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources(struct b2r2_core_job *job, bool atomic);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC
+static void job_callback_gen(struct b2r2_core_job *job);
+static void job_release_gen(struct b2r2_core_job *job);
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void tile_job_callback_gen(struct b2r2_core_job *job);
+static void tile_job_release_gen(struct b2r2_core_job *job);
+#endif
+
+
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst, struct b2r2_resolved_buf *resolved);
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf, struct b2r2_resolved_buf *resolved);
+static void sync_buf(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved, bool is_dst,
+ struct b2r2_blt_rect *rect);
+static bool is_report_list_empty(struct b2r2_control_instance *instance);
+static bool is_synching(struct b2r2_control_instance *instance);
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect);
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region);
+static int resolve_hwmem(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used, bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf);
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf);
+
+/**
+ * struct sync_args - Data for clean/flush
+ *
+ * @start: Virtual start address
+ * @end: Virtual end address
+ */
+struct sync_args {
+ unsigned long start;
+ unsigned long end;
+};
+/**
+ * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the
+ * current CPU
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void flush_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_flush_range((void *)sa->start, (void *)sa->end);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * inv_l1_cache_range_all_cpus() - Cleans and invalidates L1 cache on all CPU:s
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void flush_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(flush_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * clean_l1_cache_range_curr_cpu() - Cleans L1 cache on current CPU
+ *
+ * Ensures that data is written out from the CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void clean_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_map_area((void *)sa->start,
+ (void *)sa->end - (void *)sa->start,
+ DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * clean_l1_cache_range_all_cpus() - Cleans L1 cache on all CPU:s
+ *
+ * Ensures that data is written out from all CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void clean_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(clean_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * b2r2_blt_open - Implements file open on the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * A B2R2 BLT instance is created and stored in the file structure.
+ */
+int b2r2_control_open(struct b2r2_control_instance *instance)
+{
+ int ret = 0;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+ inc_stat(cont, &cont->stat_n_in_open);
+
+ INIT_LIST_HEAD(&instance->report_list);
+ mutex_init(&instance->lock);
+ init_waitqueue_head(&instance->report_list_waitq);
+ init_waitqueue_head(&instance->synch_done_waitq);
+ dec_stat(cont, &cont->stat_n_in_open);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_release - Implements last close on an instance of
+ * the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * All active jobs are finished or cancelled and allocated data
+ * is released.
+ */
+int b2r2_control_release(struct b2r2_control_instance *instance)
+{
+ int ret;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_release);
+
+ /* Finish all outstanding requests */
+ ret = b2r2_control_synch(instance, 0);
+ if (ret < 0)
+ b2r2_log_warn(cont->dev, "%s: b2r2_blt_sync failed with %d\n",
+ __func__, ret);
+
+ /* Now cancel any remaining outstanding request */
+ if (instance->no_of_active_requests) {
+ struct b2r2_core_job *job;
+
+ b2r2_log_warn(cont->dev, "%s: %d active requests\n", __func__,
+ instance->no_of_active_requests);
+
+ /* Find and cancel all jobs belonging to us */
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
+ while (job) {
+ b2r2_core_job_cancel(job);
+ /* Matches addref in b2r2_core_job_find... */
+ b2r2_core_job_release(job, __func__);
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
+ }
+
+ b2r2_log_warn(cont->dev, "%s: %d active requests after "
+ "cancel\n", __func__, instance->no_of_active_requests);
+ }
+
+ /* Release jobs in report list */
+ mutex_lock(&instance->lock);
+ while (!list_empty(&instance->report_list)) {
+ struct b2r2_blt_request *request = list_first_entry(
+ &instance->report_list,
+ struct b2r2_blt_request,
+ list);
+ list_del_init(&request->list);
+ mutex_unlock(&instance->lock);
+ /*
+ * This release matches the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ mutex_lock(&instance->lock);
+ }
+ mutex_unlock(&instance->lock);
+
+ dec_stat(cont, &cont->stat_n_in_release);
+
+ return 0;
+}
+
+size_t b2r2_control_read(struct b2r2_control_instance *instance,
+ struct b2r2_blt_request **request_out, bool block)
+{
+ struct b2r2_blt_request *request = NULL;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * Loop and wait here until we have anything to return or
+ * until interrupted
+ */
+ mutex_lock(&instance->lock);
+ while (list_empty(&instance->report_list)) {
+ mutex_unlock(&instance->lock);
+
+ /* Return if non blocking read */
+ if (!block)
+ return -EAGAIN;
+
+ b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__);
+ if (wait_event_interruptible(
+ instance->report_list_waitq,
+ !is_report_list_empty(instance)))
+ /* signal: tell the fs layer to handle it */
+ return -ERESTARTSYS;
+
+ /* Otherwise loop, but first reaquire the lock */
+ mutex_lock(&instance->lock);
+ }
+
+ if (!list_empty(&instance->report_list))
+ request = list_first_entry(
+ &instance->report_list, struct b2r2_blt_request, list);
+
+ if (request) {
+ /* Remove from list to avoid reading twice */
+ list_del_init(&request->list);
+
+ *request_out = request;
+ }
+ mutex_unlock(&instance->lock);
+
+ if (request)
+ return 1;
+
+ /* No report returned */
+ return 0;
+}
+
+size_t b2r2_control_read_id(struct b2r2_control_instance *instance,
+ struct b2r2_blt_request **request_out, bool block,
+ int request_id)
+{
+ struct b2r2_blt_request *request = NULL;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * Loop and wait here until we have anything to return or
+ * until interrupted
+ */
+ mutex_lock(&instance->lock);
+ while (list_empty(&instance->report_list)) {
+ mutex_unlock(&instance->lock);
+
+ /* Return if non blocking read */
+ if (!block)
+ return -EAGAIN;
+
+ b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__);
+ if (wait_event_interruptible(
+ instance->report_list_waitq,
+ !is_report_list_empty(instance)))
+ /* signal: tell the fs layer to handle it */
+ return -ERESTARTSYS;
+
+ /* Otherwise loop, but first reaquire the lock */
+ mutex_lock(&instance->lock);
+ }
+
+ if (!list_empty(&instance->report_list)) {
+ struct b2r2_blt_request *pos;
+ list_for_each_entry(pos, &instance->report_list, list) {
+ if (pos->request_id)
+ request = pos;
+ }
+ }
+
+ if (request) {
+ /* Remove from list to avoid reading twice */
+ list_del_init(&request->list);
+ *request_out = request;
+ }
+ mutex_unlock(&instance->lock);
+
+ if (request)
+ return 1;
+
+ /* No report returned */
+ return 0;
+}
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+/**
+ * b2r2_blt - Implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+int b2r2_control_blt(struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ struct b2r2_control_instance *instance = request->instance;
+ struct b2r2_control *cont = instance->control;
+
+ u32 thread_runtime_at_start = 0;
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(cont->dev,
+ "src.fmt=%#010x src.buf={%d,%d,%d} "
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev,
+ "bg.fmt=%#010x bg.buf={%d,%d,%d} "
+ "bg.w,h={%d,%d} bg.rect={%d,%d,%d,%d}\n",
+ request->user_req.bg_img.fmt,
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ b2r2_log_info(cont->dev,
+ "dst.fmt=%#010x dst.buf={%d,%d,%d} "
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect,
+ false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Background buffer */
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ ret = resolve_buf(cont, &request->user_req.bg_img,
+ &request->user_req.bg_rect,
+ false, &request->bg_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve bg buf failed,"
+ " %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_bg_buf_failed;
+ }
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(cont, &request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src mask buf failed,"
+ " %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev, "bg.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->bg_resolved.physical_address,
+ request->bg_resolved.virtual_address,
+ request->bg_resolved.is_pmem,
+ request->bg_resolved.filep,
+ request->bg_resolved.file_physical_start,
+ request->bg_resolved.file_virtual_start,
+ request->bg_resolved.file_len);
+
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE, &node_count,
+ &request->bufs, &request->buf_count,
+ &request->node_split_job);
+ if (ret == -ENOSYS) {
+ /* There was no optimized path for this request */
+ b2r2_log_info(cont->dev, "%s: No optimized path for request\n",
+ __func__);
+ goto no_optimized_path;
+
+ } else if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to analyze request,"
+ " ret = %d\n", __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Analyze failed for:\n",
+ __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the"
+ " request. Message buffer"
+ " allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(cont,
+ node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Failed to allocate nodes,"
+ " ret = %d\n", __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Build the B2R2 node list */
+ ret = b2r2_node_split_configure(cont, &request->node_split_job,
+ request->first_node);
+
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s:"
+ " Failed to perform node split, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+
+ /* Exit here if dry run */
+ if (request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /* Configure the request */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.data = (int) cont->data;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback;
+ request->job.release = job_release;
+ request->job.acquire_resources = job_acquire_resources;
+ request->job.release_resources = job_release_resources;
+
+ /* Synchronize memory occupied by the buffers */
+
+ /* Source buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved, false,
+ &request->user_req.src_rect);
+
+ /* Background buffer */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) &&
+ !(request->user_req.flags &
+ B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH) &&
+ (request->user_req.bg_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.bg_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.bg_img,
+ &request->bg_resolved, false,
+ &request->user_req.bg_rect);
+
+ /* Source mask buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved, false, NULL);
+
+ /* Destination buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved, true,
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request for debugfs */
+ cont->debugfs_latest_request = *request;
+#endif
+
+ /* Submit the job */
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request->profile)
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+
+ mutex_lock(&instance->lock);
+
+ /* Add the job to b2r2_core */
+ request_id = b2r2_core_job_add(cont, &request->job);
+ request->request_id = request_id;
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to add job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+
+ return ret >= 0 ? request_id : ret;
+
+job_add_failed:
+exit_dry_run:
+no_optimized_path:
+generate_nodes_failed:
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
+ &request->bg_resolved);
+resolve_bg_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+ if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret)
+ b2r2_log_warn(cont->dev, "%s returns with error %d\n",
+ __func__, ret);
+ job_release(&request->job);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ return ret;
+}
+
+int b2r2_control_waitjob(struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_control_instance *instance = request->instance;
+ struct b2r2_control *cont = instance->control;
+
+ /* Wait for the job to be done if synchronous */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) {
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(&request->job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev, "%s: Failed to wait job,"
+ " ret = %d\n", __func__, ret);
+ else
+ b2r2_log_info(cont->dev, "%s: Synchronous wait done\n",
+ __func__);
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add,
+ * the request must not be accessed after this call
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ return ret;
+}
+
+/**
+ * Called when job is done or cancelled
+ *
+ * @job: The job
+ */
+static void job_callback(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request = NULL;
+ struct b2r2_core *core = NULL;
+ struct b2r2_control *cont = NULL;
+
+ request = container_of(job, struct b2r2_blt_request, job);
+ core = (struct b2r2_core *) job->data;
+ cont = core->control;
+
+ if (cont->dev)
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
+ &request->bg_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ /* TODO: In the case of kernel API call, feed an asynch task to the
+ * instance worker (kthread) instead of polling for a report */
+ mutex_lock(&request->instance->lock);
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /* Add a reference because we put the job in the report list */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ if (request->profile) {
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() - request->start_time_nsec);
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+static void job_release(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request = NULL;
+ struct b2r2_core *core = NULL;
+ struct b2r2_control *cont = NULL;
+
+ request = container_of(job, struct b2r2_blt_request, job);
+ core = (struct b2r2_core *) job->data;
+ cont = core->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ b2r2_node_split_cancel(cont, &request->node_split_job);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+/**
+ * Tells the job to try to allocate the resources needed to execute the job.
+ * Called just before execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+ struct b2r2_control *cont = core->control;
+ int ret;
+ int i;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (request->buf_count == 0)
+ return 0;
+
+ if (request->buf_count > MAX_TMP_BUFS_NEEDED) {
+ b2r2_log_err(cont->dev,
+ "%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n",
+ __func__);
+ return -ENOMSG;
+ }
+
+ /*
+ * 1 to 1 mapping between request temp buffers and temp buffers
+ * (request temp buf 0 is always temp buf 0, request temp buf 1 is
+ * always temp buf 1 and so on) to avoid starvation of jobs that
+ * require multiple temp buffers. Not optimal in terms of memory
+ * usage but we avoid get into a situation where lower prio jobs can
+ * delay higher prio jobs that require more temp buffers.
+ */
+ if (cont->tmp_bufs[0].in_use)
+ return -EAGAIN;
+
+ for (i = 0; i < request->buf_count; i++) {
+ if (cont->tmp_bufs[i].buf.size < request->bufs[i].size) {
+ b2r2_log_err(cont->dev, "%s: "
+ "cont->tmp_bufs[i].buf.size < "
+ "request->bufs[i].size\n", __func__);
+ ret = -ENOMSG;
+ goto error;
+ }
+
+ cont->tmp_bufs[i].in_use = true;
+ request->bufs[i].phys_addr = cont->tmp_bufs[i].buf.phys_addr;
+ request->bufs[i].virt_addr = cont->tmp_bufs[i].buf.virt_addr;
+
+ b2r2_log_info(cont->dev, "%s: phys=%p, virt=%p\n",
+ __func__, (void *)request->bufs[i].phys_addr,
+ request->bufs[i].virt_addr);
+
+ ret = b2r2_node_split_assign_buffers(cont,
+ &request->node_split_job,
+ request->first_node, request->bufs,
+ request->buf_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < request->buf_count; i++)
+ cont->tmp_bufs[i].in_use = false;
+
+ return ret;
+}
+
+/**
+ * Tells the job to free the resources needed to execute the job.
+ * Called after execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static void job_release_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+ struct b2r2_control *cont = core->control;
+ int i;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Free any temporary buffers */
+ for (i = 0; i < request->buf_count; i++) {
+
+ b2r2_log_info(cont->dev, "%s: freeing %d bytes\n",
+ __func__, request->bufs[i].size);
+ cont->tmp_bufs[i].in_use = false;
+ memset(&request->bufs[i], 0, sizeof(request->bufs[i]));
+ }
+ request->buf_count = 0;
+
+ /*
+ * Early release of nodes
+ * FIXME: If nodes are to be reused we don't want to release here
+ */
+ if (!atomic && request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ request->first_node = NULL;
+ }
+}
+
+#endif /* !CONFIG_B2R2_GENERIC_ONLY */
+
+#ifdef CONFIG_B2R2_GENERIC
+/**
+ * Called when job for one tile is done or cancelled
+ * in the generic path.
+ *
+ * @job: The job
+ */
+static void tile_job_callback_gen(struct b2r2_core_job *job)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_core *core =
+ (struct b2r2_core *) job->data;
+ struct b2r2_control *cont = core->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Notify if a tile job is cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED)
+ b2r2_log_info(cont->dev, "%s: Tile job cancelled:\n",
+ __func__);
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job is done or cancelled.
+ * Used for the last tile in the generic path
+ * to notify waiting clients.
+ *
+ * @job: The job
+ */
+static void job_callback_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+ struct b2r2_control *cont = core->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ /* TODO: In the case of kernel API call, feed an asynch task to the
+ * instance worker (kthread) instead of polling for a report */
+ mutex_lock(&request->instance->lock);
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /*
+ * Add a reference because we put the
+ * job in the report list
+ */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when tile job should be released (free memory etc.)
+ * Should be used only for tile jobs. Tile jobs should only be used
+ * by b2r2_core, thus making ref_count trigger their release.
+ *
+ * @job: The job
+ */
+
+static void tile_job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_core *core =
+ (struct b2r2_core *) job->data;
+ struct b2r2_control *cont = core->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node_address=0x%.8x, ref_count="
+ "%d\n", __func__, job->first_node_address,
+ job->ref_count);
+
+ /* Release memory for the job */
+ kfree(job);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+
+static void job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+ struct b2r2_control *cont = core->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+
+ /* Free nodes */
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+ return 0;
+}
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+}
+
+/**
+ * b2r2_generic_blt - Generic implementation of the B2R2 blit request
+ *
+ * @request; The request to perform
+ */
+int b2r2_generic_blt(struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ s32 tmp_buf_width = 0;
+ s32 tmp_buf_height = 0;
+ u32 tmp_buf_count = 0;
+ s32 x;
+ s32 y;
+ const struct b2r2_blt_rect *dst_rect = &(request->user_req.dst_rect);
+ const s32 dst_img_width = request->user_req.dst_img.width;
+ const s32 dst_img_height = request->user_req.dst_img.height;
+ const enum b2r2_blt_flag flags = request->user_req.flags;
+ /* Descriptors for the temporary buffers */
+ struct b2r2_work_buf work_bufs[4];
+ struct b2r2_blt_rect dst_rect_tile;
+ int i;
+ struct b2r2_control_instance *instance = request->instance;
+ struct b2r2_control *cont = instance->control;
+
+ u32 thread_runtime_at_start = 0;
+ s32 nsec_active_in_b2r2 = 0;
+
+ /*
+ * Early exit if zero blt.
+ * dst_rect outside of dst_img or
+ * dst_clip_rect outside of dst_img.
+ */
+ if (dst_rect->x + dst_rect->width <= 0 ||
+ dst_rect->y + dst_rect->height <= 0 ||
+ dst_img_width <= dst_rect->x ||
+ dst_img_height <= dst_rect->y ||
+ ((flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0 &&
+ (dst_img_width <= request->user_req.dst_clip_rect.x ||
+ dst_img_height <= request->user_req.dst_clip_rect.y ||
+ request->user_req.dst_clip_rect.x +
+ request->user_req.dst_clip_rect.width <= 0 ||
+ request->user_req.dst_clip_rect.y +
+ request->user_req.dst_clip_rect.height <= 0))) {
+ goto zero_blt;
+ }
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ memset(work_bufs, 0, sizeof(work_bufs));
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(cont->dev,
+ "src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.flags,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ b2r2_log_info(cont->dev,
+ "dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n"
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n"
+ "dst_clip_rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height,
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect, false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(cont, &request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Resolve src mask buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_generic_analyze(request, &tmp_buf_width,
+ &tmp_buf_height, &tmp_buf_count, &node_count);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to analyze request, ret = %d\n",
+ __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev,
+ "%s: Analyze failed for:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev,
+ "Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(cont, node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Allocate the temporary buffers */
+ for (i = 0; i < tmp_buf_count; i++) {
+ void *virt;
+ work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4;
+
+ virt = dma_alloc_coherent(cont->dev,
+ work_bufs[i].size,
+ &(work_bufs[i].phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (virt == NULL) {
+ ret = -ENOMEM;
+ goto alloc_work_bufs_failed;
+ }
+
+ work_bufs[i].virt_addr = virt;
+ memset(work_bufs[i].virt_addr, 0xff, work_bufs[i].size);
+ }
+ ret = b2r2_generic_configure(request,
+ request->first_node, &work_bufs[0], tmp_buf_count);
+
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to perform generic configure, ret = %d\n",
+ __func__, ret);
+ goto generic_conf_failed;
+ }
+
+ /* Exit here if dry run */
+ if (flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /*
+ * Configure the request and make sure
+ * that its job is run only for the LAST tile.
+ * This is when the request is complete
+ * and waiting clients should be notified.
+ */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.data = (int) cont->data;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback_gen;
+ request->job.release = job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ request->job.acquire_resources = job_acquire_resources_gen;
+ request->job.release_resources = job_release_resources_gen;
+
+ /* Flush the L1/L2 cache for the buffers */
+
+ /* Source buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved,
+ false, /*is_dst*/
+ &request->user_req.src_rect);
+
+ /* Source mask buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved,
+ false, /*is_dst*/
+ NULL);
+
+ /* Destination buffer */
+ if (!(flags & B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved,
+ true, /*is_dst*/
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request */
+ cont->debugfs_latest_request = *request;
+#endif
+
+ /*
+ * Same nodes are reused for all the jobs needed to complete the blit.
+ * Nodes are NOT released together with associated job,
+ * as is the case with optimized b2r2_blt() path.
+ */
+ mutex_lock(&instance->lock);
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+ /*
+ * Process all but the last row in the destination rectangle.
+ * Consider only the tiles that will actually end up inside
+ * the destination image.
+ * dst_rect->height - tmp_buf_height being <=0 is allright.
+ * The loop will not be entered since y will always be equal to or
+ * greater than zero.
+ * Early exit check at the beginning handles the cases when nothing
+ * at all should be processed.
+ */
+ y = 0;
+ if (dst_rect->y < 0)
+ y = -dst_rect->y;
+
+ for (; y < dst_rect->height - tmp_buf_height &&
+ y + dst_rect->y < dst_img_height - tmp_buf_height;
+ y += tmp_buf_height) {
+ /* Tile in the destination rectangle being processed */
+ struct b2r2_blt_rect dst_rect_tile;
+ dst_rect_tile.y = y;
+ dst_rect_tile.width = tmp_buf_width;
+ dst_rect_tile.height = tmp_buf_height;
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width && x + dst_rect->x < dst_img_width;
+ x += tmp_buf_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ */
+ struct b2r2_core_job *tile_job =
+ kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ /*
+ * Skip this tile. Do not abort,
+ * just hope for better luck
+ * with rest of the tiles.
+ * Memory might become available.
+ */
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
+ continue;
+ }
+ tile_job->job_id = request->job.job_id;
+ tile_job->tag = request->job.tag;
+ tile_job->data = request->job.data;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width =
+ dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * Where applicable, calculate area in src buffer
+ * that is needed to generate the specified part
+ * of destination rectangle.
+ */
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+ /* Submit the job */
+ b2r2_log_info(cont->dev,
+ "%s: Submitting job\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+
+ request_id = b2r2_core_job_add(cont, tile_job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Failed to add tile job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done */
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(tile_job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(cont->dev,
+ "%s: Synchronous wait done\n",
+ __func__);
+
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ }
+ /* Release matching the addref in b2r2_core_job_add */
+ b2r2_core_job_release(tile_job, __func__);
+ }
+ }
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width &&
+ x + dst_rect->x < dst_img_width; x += tmp_buf_width) {
+ struct b2r2_core_job *tile_job = NULL;
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ * Do NOT allocate a tile_job for the last tile.
+ * Send the job from the request. This way clients
+ * will be notified when the whole blit is complete
+ * and not just part of it.
+ */
+ tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
+ continue;
+ }
+ tile_job->job_id = request->job.job_id;
+ tile_job->tag = request->job.tag;
+ tile_job->data = request->job.data;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
+ }
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width = dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * y is now the last row. Either because the whole dst_rect
+ * has been processed, or because the last row that will be
+ * written to dst_img has been reached. Limits imposed in
+ * the same way as for width.
+ */
+ dst_rect_tile.y = y;
+ if (y + dst_rect->y + tmp_buf_height > dst_img_height)
+ dst_rect_tile.height =
+ dst_img_height - (y + dst_rect->y);
+ else if (y + tmp_buf_height > dst_rect->height)
+ dst_rect_tile.height = dst_rect->height - y;
+ else
+ dst_rect_tile.height = tmp_buf_height;
+
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ request_id = b2r2_core_job_add(cont, tile_job);
+ } else {
+ /*
+ * Last tile. Send the job-struct from the request.
+ * Clients will be notified once it completes.
+ */
+ request_id = b2r2_core_job_add(cont, &request->job);
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to add tile job, "
+ "ret = %d\n", __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ if (tile_job != NULL)
+ kfree(tile_job);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+ mutex_unlock(&instance->lock);
+
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ ret = b2r2_core_job_wait(tile_job);
+ } else {
+ /*
+ * This is the last tile. Wait for the job-struct from
+ * the request.
+ */
+ ret = b2r2_core_job_wait(&request->job);
+ }
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(cont->dev,
+ "%s: Synchronous wait done\n", __func__);
+
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width)
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ else
+ nsec_active_in_b2r2 +=
+ request->job.nsec_active_in_hw;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add.
+ * Make sure that the correct job-struct is released
+ * when the last tile is processed.
+ */
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ b2r2_core_job_release(tile_job, __func__);
+ } else {
+ /*
+ * Update profiling information before
+ * the request is released together with
+ * its core_job.
+ */
+ if (request->profile) {
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() -
+ request->start_time_nsec);
+ request->job.nsec_active_in_hw =
+ nsec_active_in_b2r2;
+
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ b2r2_core_job_release(&request->job, __func__);
+ }
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ for (i = 0; i < tmp_buf_count; i++) {
+ dma_free_coherent(cont->dev,
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+
+ return request_id;
+
+job_add_failed:
+exit_dry_run:
+generic_conf_failed:
+alloc_work_bufs_failed:
+ for (i = 0; i < 4; i++) {
+ if (work_bufs[i].virt_addr != 0) {
+ dma_free_coherent(cont->dev,
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+ }
+
+generate_nodes_failed:
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+zero_blt:
+ job_release_gen(&request->job);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ b2r2_log_info(cont->dev, "b2r2:%s ret=%d", __func__, ret);
+ return ret;
+}
+#endif /* CONFIG_B2R2_GENERIC */
+
+/**
+ * b2r2_blt_synch - Implements wait for all or a specified job
+ *
+ * @instance: The B2R2 BLT instance
+ * @request_id: If 0, wait for all requests on this instance to finish.
+ * Else wait for request with given request id to finish.
+ */
+int b2r2_control_synch(struct b2r2_control_instance *instance,
+ int request_id)
+{
+ int ret = 0;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s, request_id=%d\n", __func__, request_id);
+
+ if (request_id == 0) {
+ /* Wait for all requests */
+ inc_stat(cont, &cont->stat_n_in_synch_0);
+
+ /* Enter state "synching" if we have any active request */
+ mutex_lock(&instance->lock);
+ if (instance->no_of_active_requests)
+ instance->synching = true;
+ mutex_unlock(&instance->lock);
+
+ /* Wait until no longer in state synching */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ dec_stat(cont, &cont->stat_n_in_synch_0);
+ } else {
+ struct b2r2_core_job *job;
+
+ inc_stat(cont, &cont->stat_n_in_synch_job);
+
+ /* Wait for specific job */
+ job = b2r2_core_job_find(cont, request_id);
+ if (job) {
+ /* Wait on find job */
+ ret = b2r2_core_job_wait(job);
+ /* Release matching the addref in b2r2_core_job_find */
+ b2r2_core_job_release(job, __func__);
+ }
+
+ /* If job not found we assume that is has been run */
+ dec_stat(cont, &cont->stat_n_in_synch_job);
+ }
+
+ b2r2_log_info(cont->dev,
+ "%s, request_id=%d, returns %d\n", __func__, request_id, ret);
+
+ return ret;
+}
+
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect)
+{
+ struct b2r2_blt_rect dst_img_bounds;
+
+ b2r2_get_img_bounding_rect(&req->dst_img, &dst_img_bounds);
+
+ b2r2_intersect_rects(&req->dst_rect, &dst_img_bounds, actual_dst_rect);
+
+ if (req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP)
+ b2r2_intersect_rects(actual_dst_rect, &req->dst_clip_rect,
+ actual_dst_rect);
+}
+
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region)
+{
+ s32 img_size;
+
+ memset(region, 0, sizeof(*region));
+
+ if (b2r2_is_zero_area_rect(rect))
+ return;
+
+ img_size = b2r2_get_img_size(cont->dev, img);
+
+ if (b2r2_is_single_plane_fmt(img->fmt) &&
+ b2r2_is_independent_pixel_fmt(img->fmt)) {
+ int img_fmt_bpp = b2r2_get_fmt_bpp(cont->dev, img->fmt);
+ u32 img_pitch = b2r2_get_img_pitch(cont->dev, img);
+
+ region->offset = (u32)(img->buf.offset + (rect->y *
+ img_pitch));
+ region->count = (u32)rect->height;
+ region->start = (u32)((rect->x * img_fmt_bpp) / 8);
+ region->end = (u32)b2r2_div_round_up(
+ (rect->x + rect->width) * img_fmt_bpp, 8);
+ region->size = img_pitch;
+ } else {
+ /*
+ * TODO: Locking entire buffer as a quick safe solution. In the
+ * future we should lock less to avoid unecessary cache
+ * synching. Pixel interleaved YCbCr formats should be quite
+ * easy, just align start and stop points on 2.
+ */
+ region->offset = (u32)img->buf.offset;
+ region->count = 1;
+ region->start = 0;
+ region->end = (u32)img_size;
+ region->size = (u32)img_size;
+ }
+}
+
+static int resolve_hwmem(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf)
+{
+ int return_value = 0;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+ enum hwmem_access required_access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region region;
+
+ resolved_buf->hwmem_alloc =
+ hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(resolved_buf->hwmem_alloc)) {
+ return_value = PTR_ERR(resolved_buf->hwmem_alloc);
+ b2r2_log_info(cont->dev, "%s: hwmem_resolve_by_name failed, "
+ "error code: %i\n", __func__, return_value);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(resolved_buf->hwmem_alloc, &resolved_buf->file_len,
+ &mem_type, &access);
+
+ required_access = (is_dst ? HWMEM_ACCESS_WRITE : HWMEM_ACCESS_READ) |
+ HWMEM_ACCESS_IMPORT;
+ if ((required_access & access) != required_access) {
+ b2r2_log_info(cont->dev,
+ "%s: Insufficient access to hwmem (%d, requires %d)"
+ "buffer.\n", __func__, access, required_access);
+ return_value = -EACCES;
+ goto access_check_failed;
+ }
+
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer is scattered.\n",
+ __func__);
+ return_value = -EINVAL;
+ goto buf_scattered;
+ }
+
+ if (resolved_buf->file_len <
+ img->buf.offset +
+ (__u32)b2r2_get_img_size(cont->dev, img)) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer too small. (%d < "
+ "%d)\n", __func__, resolved_buf->file_len,
+ img->buf.offset +
+ (__u32)b2r2_get_img_size(cont->dev, img));
+ return_value = -EINVAL;
+ goto size_check_failed;
+ }
+
+ return_value = hwmem_pin(resolved_buf->hwmem_alloc, &mem_chunk,
+ &mem_chunk_length);
+ if (return_value < 0) {
+ b2r2_log_info(cont->dev, "%s: hwmem_pin failed, "
+ "error code: %i\n", __func__, return_value);
+ goto pin_failed;
+ }
+ resolved_buf->file_physical_start = mem_chunk.paddr;
+
+ set_up_hwmem_region(cont, img, rect_2b_used, &region);
+ return_value = hwmem_set_domain(resolved_buf->hwmem_alloc,
+ required_access, HWMEM_DOMAIN_SYNC, &region);
+ if (return_value < 0) {
+ b2r2_log_info(cont->dev, "%s: hwmem_set_domain failed, "
+ "error code: %i\n", __func__, return_value);
+ goto set_domain_failed;
+ }
+
+ resolved_buf->physical_address =
+ resolved_buf->file_physical_start + img->buf.offset;
+
+ goto out;
+
+set_domain_failed:
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+pin_failed:
+size_check_failed:
+buf_scattered:
+access_check_failed:
+ hwmem_release(resolved_buf->hwmem_alloc);
+resolve_failed:
+
+out:
+ return return_value;
+}
+
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf)
+{
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+ hwmem_release(resolved_buf->hwmem_alloc);
+}
+
+/**
+ * unresolve_buf() - Must be called after resolve_buf
+ *
+ * @buf: The buffer specification as supplied from user space
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf,
+ struct b2r2_resolved_buf *resolved)
+{
+#ifdef CONFIG_ANDROID_PMEM
+ if (resolved->is_pmem && resolved->filep)
+ put_pmem_file(resolved->filep);
+#endif
+ if (resolved->hwmem_alloc != NULL)
+ unresolve_hwmem(resolved);
+}
+
+/**
+ * get_fb_info() - Fill buf with framebuffer info
+ *
+ * @file: The framebuffer file
+ * @buf: Gathered information about the buffer
+ * @img_offset: Image offset info frame buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int get_fb_info(struct file *file,
+ struct b2r2_resolved_buf *buf,
+ __u32 img_offset)
+{
+#ifdef CONFIG_FB
+ if (file && buf &&
+ MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ int i;
+ /*
+ * (OK to do it like this, no locking???)
+ */
+ for (i = 0; i < num_registered_fb; i++) {
+ struct fb_info *info = registered_fb[i];
+
+ if (info && info->dev &&
+ MINOR(info->dev->devt) ==
+ MINOR(file->f_dentry->d_inode->i_rdev)) {
+ buf->file_physical_start = info->fix.smem_start;
+ buf->file_virtual_start = (u32)info->screen_base;
+ buf->file_len = info->fix.smem_len;
+ buf->physical_address = buf->file_physical_start +
+ img_offset;
+ buf->virtual_address =
+ (void *) (buf->file_virtual_start +
+ img_offset);
+ return 0;
+ }
+ }
+ }
+#endif
+ return -EINVAL;
+}
+
+/**
+ * resolve_buf() - Returns the physical & virtual addresses of a B2R2 blt buffer
+ *
+ * @img: The image specification as supplied from user space
+ * @rect_2b_used: The part of the image b2r2 will use.
+ * @usage: Specifies how the buffer will be used.
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved)
+{
+ int ret = 0;
+
+ memset(resolved, 0, sizeof(*resolved));
+
+ switch (img->buf.type) {
+ case B2R2_BLT_PTR_NONE:
+ break;
+
+ case B2R2_BLT_PTR_PHYSICAL:
+ resolved->physical_address = img->buf.offset;
+ resolved->file_len = img->buf.len;
+ break;
+
+ /* FD + OFFSET type */
+ case B2R2_BLT_PTR_FD_OFFSET: {
+ /*
+ * TODO: Do we need to check if the process is allowed to
+ * read/write (depending on if it's dst or src) to the file?
+ */
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(
+ img->buf.fd,
+ (unsigned long *) &resolved->file_physical_start,
+ (unsigned long *) &resolved->file_virtual_start,
+ (unsigned long *) &resolved->file_len,
+ &resolved->filep)) {
+ resolved->physical_address =
+ resolved->file_physical_start +
+ img->buf.offset;
+ resolved->virtual_address = (void *)
+ (resolved->file_virtual_start +
+ img->buf.offset);
+ resolved->is_pmem = true;
+ } else
+#endif
+ {
+ int fput_needed;
+ struct file *file;
+
+ file = fget_light(img->buf.fd, &fput_needed);
+ if (file == NULL)
+ return -EINVAL;
+
+ ret = get_fb_info(file, resolved,
+ img->buf.offset);
+ fput_light(file, fput_needed);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Check bounds */
+ if (img->buf.offset + img->buf.len >
+ resolved->file_len) {
+ ret = -ESPIPE;
+ unresolve_buf(cont, &img->buf, resolved);
+ }
+
+ break;
+ }
+
+ case B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET:
+ ret = resolve_hwmem(cont, img, rect_2b_used, is_dst, resolved);
+ break;
+
+ default:
+ b2r2_log_warn(cont->dev, "%s: Failed to resolve buf type %d\n",
+ __func__, img->buf.type);
+
+ ret = -EINVAL;
+ break;
+
+ }
+
+ return ret;
+}
+
+/**
+ * sync_buf - Synchronizes the memory occupied by an image buffer.
+ *
+ * @buf: User buffer specification
+ * @resolved_buf: Gathered info (physical address etc.) about buffer
+ * @is_dst: true if the buffer is a destination buffer, false if the buffer is a
+ * source buffer.
+ * @rect: rectangle in the image buffer that should be synced.
+ * NULL if the buffer is a source mask.
+ * @img_width: width of the complete image buffer
+ * @fmt: buffer format
+*/
+static void sync_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved,
+ bool is_dst,
+ struct b2r2_blt_rect *rect)
+{
+ struct sync_args sa;
+ u32 start_phys, end_phys;
+
+ if (B2R2_BLT_PTR_NONE == img->buf.type ||
+ B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type)
+ return;
+
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+
+ /*
+ * TODO: Very ugly. We should find out whether the memory is coherent in
+ * some generic way but cache handling will be rewritten soon so there
+ * is no use spending time on it. In the new design this will probably
+ * not be a problem.
+ */
+ /* Frame buffer is coherent, at least now. */
+ if (!resolved->is_pmem) {
+ /*
+ * Drain the write buffers as they are not always part of the
+ * coherent concept.
+ */
+ wmb();
+
+ return;
+ }
+
+ /*
+ * src_mask does not have rect.
+ * Also flush full buffer for planar and semiplanar YUV formats
+ */
+ if (rect == NULL ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE)) {
+ sa.start = (unsigned long)resolved->virtual_address;
+ sa.end = (unsigned long)resolved->virtual_address +
+ img->buf.len;
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+ } else {
+ /*
+ * buffer is not a src_mask so make use of rect when
+ * clean & flush caches
+ */
+ u32 bpp; /* Bits per pixel */
+ u32 pitch;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ bpp = 16;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ bpp = 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ bpp = 32;
+ break;
+ default:
+ bpp = 12;
+ }
+ if (img->pitch == 0)
+ pitch = (img->width * bpp) / 8;
+ else
+ pitch = img->pitch;
+
+ /*
+ * For 422I formats 2 horizontal pixels share color data.
+ * Thus, the x position must be aligned down to closest even
+ * number and width must be aligned up.
+ */
+ {
+ s32 x;
+ s32 width;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ x = (rect->x / 2) * 2;
+ width = ((rect->width + 1) / 2) * 2;
+ break;
+ default:
+ x = rect->x;
+ width = rect->width;
+ break;
+ }
+
+ sa.start = (unsigned long)resolved->virtual_address +
+ rect->y * pitch + (x * bpp) / 8;
+ sa.end = (unsigned long)sa.start +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+
+ start_phys = resolved->physical_address +
+ rect->y * pitch + (x * bpp) / 8;
+ end_phys = start_phys +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+ }
+ }
+
+ /*
+ * The virtual address to a pmem buffer is retrieved from ioremap, not
+ * sure if it's ok to use such an address as a kernel virtual address.
+ * When doing it at a higher level such as dma_map_single it triggers an
+ * error but at lower levels such as dmac_clean_range it seems to work,
+ * hence the low level stuff.
+ */
+
+ if (is_dst) {
+ /*
+ * According to ARM's docs you must clean before invalidating
+ * (ie flush) to avoid loosing data.
+ */
+
+ /* Flush L1 cache */
+#ifdef CONFIG_SMP
+ flush_l1_cache_range_all_cpus(&sa);
+#else
+ flush_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Flush L2 cache */
+ outer_flush_range(start_phys, end_phys);
+ } else {
+ /* Clean L1 cache */
+#ifdef CONFIG_SMP
+ clean_l1_cache_range_all_cpus(&sa);
+#else
+ clean_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Clean L2 cache */
+ outer_clean_range(start_phys, end_phys);
+ }
+}
+
+/**
+ * is_report_list_empty() - Spin lock protected check of report list
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_report_list_empty(struct b2r2_control_instance *instance)
+{
+ bool is_empty;
+
+ mutex_lock(&instance->lock);
+ is_empty = list_empty(&instance->report_list);
+ mutex_unlock(&instance->lock);
+
+ return is_empty;
+}
+
+/**
+ * is_synching() - Spin lock protected check if synching
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_synching(struct b2r2_control_instance *instance)
+{
+ bool is_synching;
+
+ mutex_lock(&instance->lock);
+ is_synching = instance->synching;
+ mutex_unlock(&instance->lock);
+
+ return is_synching;
+}
+
+/**
+ * inc_stat() - Spin lock protected increment of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be incremented
+ */
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat)
+{
+ mutex_lock(&cont->stat_lock);
+ (*stat)++;
+ mutex_unlock(&cont->stat_lock);
+}
+
+/**
+ * inc_stat() - Spin lock protected decrement of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be decremented
+ */
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat)
+{
+ mutex_lock(&cont->stat_lock);
+ (*stat)--;
+ mutex_unlock(&cont->stat_lock);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * debugfs_b2r2_blt_request_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_request_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_size = sprintf_req(&cont->debugfs_latest_request, Buf,
+ sizeof(char) * 4096);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_request_fops - File operations for B2R2 request debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_request_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_request_read,
+};
+
+/**
+ * struct debugfs_reg - Represents a B2R2 node "register"
+ *
+ * @name: Register name
+ * @offset: Offset within the node
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_node_regs - Array with all the registers in a B2R2 node, for debug
+ */
+static const struct debugfs_reg debugfs_node_regs[] = {
+ {"GROUP0.B2R2_NIP", offsetof(struct b2r2_link_list, GROUP0.B2R2_NIP)},
+ {"GROUP0.B2R2_CIC", offsetof(struct b2r2_link_list, GROUP0.B2R2_CIC)},
+ {"GROUP0.B2R2_INS", offsetof(struct b2r2_link_list, GROUP0.B2R2_INS)},
+ {"GROUP0.B2R2_ACK", offsetof(struct b2r2_link_list, GROUP0.B2R2_ACK)},
+
+ {"GROUP1.B2R2_TBA", offsetof(struct b2r2_link_list, GROUP1.B2R2_TBA)},
+ {"GROUP1.B2R2_TTY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TTY)},
+ {"GROUP1.B2R2_TXY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TXY)},
+ {"GROUP1.B2R2_TSZ", offsetof(struct b2r2_link_list, GROUP1.B2R2_TSZ)},
+
+ {"GROUP2.B2R2_S1CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S1CF)},
+ {"GROUP2.B2R2_S2CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S2CF)},
+
+ {"GROUP3.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP3.B2R2_SBA)},
+ {"GROUP3.B2R2_STY", offsetof(struct b2r2_link_list, GROUP3.B2R2_STY)},
+ {"GROUP3.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP3.B2R2_SXY)},
+ {"GROUP3.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP3.B2R2_SSZ)},
+
+ {"GROUP4.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP4.B2R2_SBA)},
+ {"GROUP4.B2R2_STY", offsetof(struct b2r2_link_list, GROUP4.B2R2_STY)},
+ {"GROUP4.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP4.B2R2_SXY)},
+ {"GROUP4.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP4.B2R2_SSZ)},
+
+ {"GROUP5.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP5.B2R2_SBA)},
+ {"GROUP5.B2R2_STY", offsetof(struct b2r2_link_list, GROUP5.B2R2_STY)},
+ {"GROUP5.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP5.B2R2_SXY)},
+ {"GROUP5.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP5.B2R2_SSZ)},
+
+ {"GROUP6.B2R2_CWO", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWO)},
+ {"GROUP6.B2R2_CWS", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWS)},
+
+ {"GROUP7.B2R2_CCO", offsetof(struct b2r2_link_list, GROUP7.B2R2_CCO)},
+ {"GROUP7.B2R2_CML", offsetof(struct b2r2_link_list, GROUP7.B2R2_CML)},
+
+ {"GROUP8.B2R2_FCTL", offsetof(struct b2r2_link_list, GROUP8.B2R2_FCTL)},
+ {"GROUP8.B2R2_PMK", offsetof(struct b2r2_link_list, GROUP8.B2R2_PMK)},
+
+ {"GROUP9.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP9.B2R2_RSF)},
+ {"GROUP9.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP9.B2R2_RZI)},
+ {"GROUP9.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_HFP)},
+ {"GROUP9.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_VFP)},
+
+ {"GROUP10.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP10.B2R2_RSF)},
+ {"GROUP10.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP10.B2R2_RZI)},
+ {"GROUP10.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_HFP)},
+ {"GROUP10.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_VFP)},
+
+ {"GROUP11.B2R2_FF0", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF0)},
+ {"GROUP11.B2R2_FF1", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF1)},
+ {"GROUP11.B2R2_FF2", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF2)},
+ {"GROUP11.B2R2_FF3", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF3)},
+
+ {"GROUP12.B2R2_KEY1", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY1)},
+ {"GROUP12.B2R2_KEY2", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY2)},
+
+ {"GROUP13.B2R2_XYL", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYL)},
+ {"GROUP13.B2R2_XYP", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYP)},
+
+ {"GROUP14.B2R2_SAR", offsetof(struct b2r2_link_list, GROUP14.B2R2_SAR)},
+ {"GROUP14.B2R2_USR", offsetof(struct b2r2_link_list, GROUP14.B2R2_USR)},
+
+ {"GROUP15.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX0)},
+ {"GROUP15.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX1)},
+ {"GROUP15.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX2)},
+ {"GROUP15.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX3)},
+
+ {"GROUP16.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX0)},
+ {"GROUP16.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX1)},
+ {"GROUP16.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX2)},
+ {"GROUP16.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX3)},
+};
+
+/**
+ * debugfs_b2r2_blt_stat_read() - Implements debugfs read for B2R2 BLT
+ * statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&cont->stat_lock);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ cont->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Released jobs : %lu\n",
+ cont->stat_n_jobs_released);
+ dev_size += sprintf(Buf + dev_size, "Jobs in report list : %lu\n",
+ cont->stat_n_jobs_in_report_list);
+ dev_size += sprintf(Buf + dev_size, "Clients in open : %lu\n",
+ cont->stat_n_in_open);
+ dev_size += sprintf(Buf + dev_size, "Clients in release : %lu\n",
+ cont->stat_n_in_release);
+ dev_size += sprintf(Buf + dev_size, "Clients in blt : %lu\n",
+ cont->stat_n_in_blt);
+ dev_size += sprintf(Buf + dev_size, " synch : %lu\n",
+ cont->stat_n_in_blt_synch);
+ dev_size += sprintf(Buf + dev_size, " add : %lu\n",
+ cont->stat_n_in_blt_add);
+ dev_size += sprintf(Buf + dev_size, " wait : %lu\n",
+ cont->stat_n_in_blt_wait);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch 0 : %lu\n",
+ cont->stat_n_in_synch_0);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch job : %lu\n",
+ cont->stat_n_in_synch_job);
+ dev_size += sprintf(Buf + dev_size, "Clients in query_cap : %lu\n",
+ cont->stat_n_in_query_cap);
+ mutex_unlock(&cont->stat_lock);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_stat_fops() - File operations for B2R2 BLT
+ * statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_stat_read,
+};
+#endif
+
+static void init_tmp_bufs(struct b2r2_control *cont)
+{
+ int i = 0;
+
+ for (i = 0; i < (sizeof(cont->tmp_bufs) / sizeof(struct tmp_buf));
+ i++) {
+ cont->tmp_bufs[i].buf.virt_addr = dma_alloc_coherent(
+ cont->dev, MAX_TMP_BUF_SIZE,
+ &cont->tmp_bufs[i].buf.phys_addr, GFP_DMA);
+ if (cont->tmp_bufs[i].buf.virt_addr != NULL)
+ cont->tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE;
+ else {
+ b2r2_log_err(cont->dev, "%s: Failed to allocate temp "
+ "buffer %i\n", __func__, i);
+ cont->tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+static void destroy_tmp_bufs(struct b2r2_control *cont)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
+ if (cont->tmp_bufs[i].buf.size != 0) {
+ dma_free_coherent(cont->dev,
+ cont->tmp_bufs[i].buf.size,
+ cont->tmp_bufs[i].buf.virt_addr,
+ cont->tmp_bufs[i].buf.phys_addr);
+
+ cont->tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+/**
+ * b2r2_blt_module_init() - Module init function
+ *
+ * Returns 0 if OK else negative error code
+ */
+int b2r2_control_init(struct b2r2_control *cont)
+{
+ int ret;
+
+ mutex_init(&cont->stat_lock);
+
+#ifdef CONFIG_B2R2_GENERIC
+ /* Initialize generic path */
+ b2r2_generic_init(cont);
+#endif
+ /* Initialize node splitter */
+ ret = b2r2_node_split_init(cont);
+ if (ret) {
+ printk(KERN_WARNING "%s: node split init fails\n", __func__);
+ goto b2r2_node_split_init_fail;
+ }
+
+ b2r2_log_info(cont->dev, "%s: device registered\n", __func__);
+
+ cont->dev->coherent_dma_mask = 0xFFFFFFFF;
+ init_tmp_bufs(cont);
+ ret = b2r2_filters_init(cont);
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: failed to init filters\n",
+ __func__);
+ goto b2r2_filter_init_fail;
+ }
+
+ /* Initialize memory allocator */
+ ret = b2r2_mem_init(cont, B2R2_HEAP_SIZE,
+ 4, sizeof(struct b2r2_node));
+ if (ret) {
+ printk(KERN_WARNING "%s: initializing B2R2 memhandler fails\n",
+ __func__);
+ goto b2r2_mem_init_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs */
+ if (!IS_ERR_OR_NULL(cont->debugfs_root_dir)) {
+ debugfs_create_file("last_request", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_request_fops);
+ debugfs_create_file("stats", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_stat_fops);
+ }
+#endif
+
+ b2r2_log_info(cont->dev, "%s: done\n", __func__);
+
+ return ret;
+
+b2r2_mem_init_fail:
+ b2r2_filters_exit(cont);
+b2r2_filter_init_fail:
+ b2r2_node_split_exit(cont);
+b2r2_node_split_init_fail:
+#ifdef CONFIG_B2R2_GENERIC
+ b2r2_generic_exit(cont);
+#endif
+ return ret;
+}
+
+/**
+ * b2r2_control_exit() - Module exit function
+ */
+void b2r2_control_exit(struct b2r2_control *cont)
+{
+ if (cont) {
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR_OR_NULL(cont->debugfs_root_dir)) {
+ debugfs_remove_recursive(cont->debugfs_root_dir);
+ cont->debugfs_root_dir = NULL;
+ }
+#endif
+ b2r2_mem_exit(cont);
+ destroy_tmp_bufs(cont);
+ b2r2_node_split_exit(cont);
+#if defined(CONFIG_B2R2_GENERIC)
+ b2r2_generic_exit(cont);
+#endif
+ b2r2_filters_exit(cont);
+ }
+}
+
+MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_control.h b/drivers/video/b2r2/b2r2_control.h
new file mode 100644
index 00000000000..d13d2188618
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_control.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * ST-Ericsson B2R2 internal definitions
+ *
+ * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_CONTROL_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_CONTROL_H_
+
+#include "b2r2_internal.h"
+
+int b2r2_control_init(struct b2r2_control *cont);
+void b2r2_control_exit(struct b2r2_control *cont);
+int b2r2_control_open(struct b2r2_control_instance *instance);
+int b2r2_control_release(struct b2r2_control_instance *instance);
+
+int b2r2_control_blt(struct b2r2_blt_request *request);
+int b2r2_generic_blt(struct b2r2_blt_request *request);
+int b2r2_control_waitjob(struct b2r2_blt_request *request);
+int b2r2_control_synch(struct b2r2_control_instance *instance,
+ int request_id);
+size_t b2r2_control_read(struct b2r2_control_instance *instance,
+ struct b2r2_blt_request **request_out, bool block);
+size_t b2r2_control_read_id(struct b2r2_control_instance *instance,
+ struct b2r2_blt_request **request_out, bool block,
+ int request_id);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c
new file mode 100644
index 00000000000..02071d5f989
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.c
@@ -0,0 +1,2763 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * TODO: Clock address from platform data
+ * Platform data should have string id instead of numbers
+ * b2r2_remove, some type of runtime problem when kernel hacking
+ * debug features on
+ *
+ * Is there already a priority list in kernel?
+ * Is it possible to handle clock using clock framework?
+ * uTimeOut, use mdelay instead?
+ * Measure performance
+ *
+ * Exchange our home-cooked ref count with kernel kref? See
+ * http://lwn.net/Articles/336224/
+ *
+ * B2R2:
+ * Source fill 2 bug
+ * Check with Symbian?
+ */
+
+/* include file */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_core.h"
+#include "b2r2_global.h"
+#include "b2r2_structures.h"
+#include "b2r2_control.h"
+#include "b2r2_profiler_api.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+
+/**
+ * B2R2 Hardware defines below
+ */
+
+/* - BLT_AQ_CTL */
+#define B2R2_AQ_Enab (0x80000000)
+#define B2R2_AQ_PRIOR_0 (0x0)
+#define B2R2_AQ_PRIOR_1 (0x1)
+#define B2R2_AQ_PRIOR_2 (0x2)
+#define B2R2_AQ_PRIOR_3 (0x3)
+#define B2R2_AQ_NODE_REPEAT_INT (0x100000)
+#define B2R2_AQ_STOP_INT (0x200000)
+#define B2R2_AQ_LNA_REACH_INT (0x400000)
+#define B2R2_AQ_COMPLETED_INT (0x800000)
+
+/* - BLT_CTL */
+#define B2R2BLT_CTLGLOBAL_soft_reset (0x80000000)
+#define B2R2BLT_CTLStep_By_Step (0x20000000)
+#define B2R2BLT_CTLBig_not_little (0x10000000)
+#define B2R2BLT_CTLMask (0xb0000000)
+#define B2R2BLT_CTLTestMask (0xb0000000)
+#define B2R2BLT_CTLInitialValue (0x0)
+#define B2R2BLT_CTLAccessType (INITIAL_TEST)
+#define B2R2BLT_CTL (0xa00)
+
+/* - BLT_ITS */
+#define B2R2BLT_ITSRLD_ERROR (0x80000000)
+#define B2R2BLT_ITSAQ4_Node_Notif (0x8000000)
+#define B2R2BLT_ITSAQ4_Node_repeat (0x4000000)
+#define B2R2BLT_ITSAQ4_Stopped (0x2000000)
+#define B2R2BLT_ITSAQ4_LNA_Reached (0x1000000)
+#define B2R2BLT_ITSAQ3_Node_Notif (0x800000)
+#define B2R2BLT_ITSAQ3_Node_repeat (0x400000)
+#define B2R2BLT_ITSAQ3_Stopped (0x200000)
+#define B2R2BLT_ITSAQ3_LNA_Reached (0x100000)
+#define B2R2BLT_ITSAQ2_Node_Notif (0x80000)
+#define B2R2BLT_ITSAQ2_Node_repeat (0x40000)
+#define B2R2BLT_ITSAQ2_Stopped (0x20000)
+#define B2R2BLT_ITSAQ2_LNA_Reached (0x10000)
+#define B2R2BLT_ITSAQ1_Node_Notif (0x8000)
+#define B2R2BLT_ITSAQ1_Node_repeat (0x4000)
+#define B2R2BLT_ITSAQ1_Stopped (0x2000)
+#define B2R2BLT_ITSAQ1_LNA_Reached (0x1000)
+#define B2R2BLT_ITSCQ2_Repaced (0x80)
+#define B2R2BLT_ITSCQ2_Node_Notif (0x40)
+#define B2R2BLT_ITSCQ2_retriggered (0x20)
+#define B2R2BLT_ITSCQ2_completed (0x10)
+#define B2R2BLT_ITSCQ1_Repaced (0x8)
+#define B2R2BLT_ITSCQ1_Node_Notif (0x4)
+#define B2R2BLT_ITSCQ1_retriggered (0x2)
+#define B2R2BLT_ITSCQ1_completed (0x1)
+#define B2R2BLT_ITSMask (0x8ffff0ff)
+#define B2R2BLT_ITSTestMask (0x8ffff0ff)
+#define B2R2BLT_ITSInitialValue (0x0)
+#define B2R2BLT_ITSAccessType (INITIAL_TEST)
+#define B2R2BLT_ITS (0xa04)
+
+/* - BLT_STA1 */
+#define B2R2BLT_STA1BDISP_IDLE (0x1)
+#define B2R2BLT_STA1Mask (0x1)
+#define B2R2BLT_STA1TestMask (0x1)
+#define B2R2BLT_STA1InitialValue (0x1)
+#define B2R2BLT_STA1AccessType (INITIAL_TEST)
+#define B2R2BLT_STA1 (0xa08)
+
+/**
+ * b2r2_core - Quick link to administration data for B2R2
+ */
+static struct b2r2_core *b2r2_core[B2R2_MAX_NBR_DEVICES];
+
+/* Local functions */
+static void check_prio_list(struct b2r2_core *core, bool atomic);
+static void clear_interrupts(struct b2r2_core *core);
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job);
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_list);
+static void job_work_function(struct work_struct *ptr);
+static void init_job(struct b2r2_core_job *job);
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list);
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id);
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list);
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag);
+
+static int domain_enable(struct b2r2_core *core);
+static void domain_disable(struct b2r2_core *core);
+
+static void stop_queue(enum b2r2_core_queue queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+static void printk_regs(struct b2r2_core *core);
+static int hw_reset(struct b2r2_core *core);
+static void timeout_work_function(struct work_struct *ptr);
+#endif
+
+static void reset_hw_timer(struct b2r2_core_job *job);
+static void start_hw_timer(struct b2r2_core_job *job);
+static void stop_hw_timer(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+
+static int init_hw(struct b2r2_core *core);
+static void exit_hw(struct b2r2_core *core);
+
+/* Tracking release bug... */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+/**
+ * ar_add() - Adds an addref or a release to the array
+ *
+ * @core: The b2r2 core entity
+ * @job: The job that has been referenced
+ * @caller: The caller of addref / release
+ * @addref: true if it is an addref else false for release
+ */
+static void ar_add(struct b2r2_core *core, struct b2r2_core_job *job,
+ const char *caller, bool addref)
+{
+ core->ar[core->ar_write].addref = addref;
+ core->ar[core->ar_write].job = job;
+ core->ar[core->ar_write].caller = caller;
+ core->ar[core->ar_write].ref_count = job->ref_count;
+ core->ar_write = (core->ar_write + 1) %
+ ARRAY_SIZE(core->ar);
+ if (core->ar_write == core->ar_read)
+ core->ar_read = (core->ar_read + 1) %
+ ARRAY_SIZE(core->ar);
+}
+
+/**
+ * sprintf_ar() - Writes all addref / release to a string buffer
+ *
+ * @core: The b2r2 core entity
+ * @buf: Receiving character bufefr
+ * @job: Which job to write or NULL for all
+ *
+ * NOTE! No buffer size check!!
+ */
+static char *sprintf_ar(struct b2r2_core *core, char *buf,
+ struct b2r2_core_job *job)
+{
+ int i;
+ int size = 0;
+
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
+ if (!job || job == ar->job)
+ size += sprintf(buf + size,
+ "%s on %p from %s, ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+
+ return buf;
+}
+
+/**
+ * printk_ar() - Writes all addref / release using dev_info
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to write or NULL for all
+ */
+static void printk_ar(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ int i;
+
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
+ if (!job || job == ar->job)
+ b2r2_log_info(core->dev, "%s on %p from %s,"
+ " ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+}
+#endif
+
+/**
+ * internal_job_addref() - Increments the reference count for a job
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to increment reference count for
+ * @caller: Name of function calling addref (for debug)
+ *
+ * Note that core->lock _must_ be held
+ */
+static void internal_job_addref(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+
+ /* Sanity checks */
+ BUG_ON(core == NULL);
+ BUG_ON(job == NULL);
+
+ b2r2_log_info(core->dev, "%s (core: %p, job: %p) (from %s)\n",
+ __func__, core, job, caller);
+
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (core: %p, job: %p) "
+ "start=%X end=%X ref_count=%d\n",
+ __func__, core, job, job->start_sentinel,
+ job->end_sentinel, job->ref_count);
+
+ /* Something is wrong, print the addref / release array */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(core, NULL);
+#endif
+ }
+
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+
+ /* Do the actual reference count increment */
+ ref_count = ++job->ref_count;
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Keep track of addref / release */
+ ar_add(core, job, caller, true);
+#endif
+
+ b2r2_log_info(core->dev, "%s called from %s (core: %p, job: %p): Ref "
+ "Count is %d\n", __func__, caller, core, job, job->ref_count);
+}
+
+/**
+ * internal_job_release() - Decrements the reference count for a job
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to decrement reference count for
+ * @caller: Name of function calling release (for debug)
+ *
+ * Returns true if job_release should be called by caller
+ * (reference count reached zero).
+ *
+ * Note that core->lock _must_ be held
+ */
+static bool internal_job_release(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+ bool call_release = false;
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ b2r2_log_info(core->dev, "%s (core: %p, job: %p) (from %s)\n",
+ __func__, core, job, caller);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (core: %p, job: %p) start=%X "
+ "end=%X ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(core, NULL);
+#endif
+ }
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+ BUG_ON(job->ref_count == 0 || job->ref_count > 10);
+
+ /* Do the actual decrement */
+ ref_count = --job->ref_count;
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ ar_add(core, job, caller, false);
+#endif
+ b2r2_log_info(core->dev, "%s called from %s (core: %p, job: %p) "
+ "Ref Count is %d\n", __func__, caller, core, job, ref_count);
+
+ if (!ref_count && job->release) {
+ call_release = true;
+ /* Job will now cease to exist */
+ job->start_sentinel = 0xFFFFFFFF;
+ job->end_sentinel = 0xFFFFFFFF;
+ }
+ return call_release;
+}
+
+
+
+/* Exported functions */
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ struct b2r2_core *core;
+
+ BUG_ON(job == NULL || job->data == 0);
+ core = (struct b2r2_core *) job->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ internal_job_addref(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ bool call_release = false;
+ struct b2r2_core *core;
+
+ BUG_ON(job == NULL || job->data == 0);
+ core = (struct b2r2_core *) job->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ call_release = internal_job_release(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ if (call_release)
+ job->release(job);
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (core: %p, job: %p)\n",
+ __func__, core, job);
+
+ /* Enable B2R2 */
+ domain_enable(core);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Check that we have not been powered down */
+ if (!core->domain_enabled) {
+ spin_unlock_irqrestore(&core->lock, flags);
+ return -ENOSYS;
+ }
+
+ core->stat_n_jobs_added++;
+
+ /* Initialise internal job data */
+ init_job(job);
+
+ /* Initial reference, should be released by caller of this function */
+ job->ref_count = 1;
+
+ /* Insert job into prio list */
+ insert_into_prio_list(core, job);
+
+ /* Check if we can dispatch job */
+ check_prio_list(core, false);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job->job_id;
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (core: %p, job_id: %d)\n",
+ __func__, core, job_id);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Look through prio queue */
+ job = find_job_in_list(job_id, &core->prio_queue);
+
+ if (!job)
+ job = find_job_in_active_jobs(core, job_id);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job;
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev,
+ "%s (core: %p, tag: %d)\n", __func__, core, tag);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Look through prio queue */
+ job = find_tag_in_list(core, tag, &core->prio_queue);
+
+ if (!job)
+ job = find_tag_in_active_jobs(core, tag);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job;
+}
+
+/**
+ * is_job_done() - Spin lock protected check if job is done
+ *
+ * @job: Job to check
+ *
+ * Returns true if job is done or cancelled
+ *
+ * core->lock must _NOT_ be held when calling this function
+ */
+static bool is_job_done(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ bool job_is_done;
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ job_is_done =
+ job->job_state != B2R2_CORE_JOB_QUEUED &&
+ job->job_state != B2R2_CORE_JOB_RUNNING;
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job_is_done;
+}
+
+/**
+ * b2r2_core_job_wait()
+ *
+ * @job:
+ *
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job)
+{
+ int ret = 0;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+#endif
+
+ b2r2_log_info(core->dev, "%s (core: %p, job: %p)\n",
+ __func__, core, job);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Wait for the job to be done */
+ ret = wait_event_interruptible(
+ job->event,
+ is_job_done(job));
+
+ if (ret)
+ b2r2_log_warn(core->dev,
+ "%s: wait_event_interruptible returns %d state is %d",
+ __func__, ret, job->job_state);
+ return ret;
+}
+
+/**
+ * cancel_job() - Cancels a job (removes it from prio list or active jobs) and
+ * calls the job callback
+ *
+ * @job: Job to cancel
+ *
+ * Returns true if the job was found and cancelled
+ *
+ * core->lock must be held when calling this function
+ */
+static bool cancel_job(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ bool found_job = false;
+ bool job_was_active = false;
+
+ /* Remove from prio list */
+ if (job->job_state == B2R2_CORE_JOB_QUEUED) {
+ list_del_init(&job->list);
+ found_job = true;
+ }
+
+ /* Remove from active jobs */
+ if (!found_job && core->n_active_jobs > 0) {
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ if (core->active_jobs[i] == job) {
+ stop_queue((enum b2r2_core_queue)i);
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ found_job = true;
+ job_was_active = true;
+ }
+ }
+ }
+
+ /* Handle done list & callback */
+ if (found_job) {
+ /* Job is canceled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ queue_work(core->work_queue, &job->work);
+
+ /* Statistics */
+ if (!job_was_active)
+ core->stat_n_jobs_in_prio_list--;
+
+ }
+
+ return found_job;
+}
+
+/* core->lock _must_ _NOT_ be held when calling this function */
+int b2r2_core_job_cancel(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+
+ b2r2_log_info(core->dev, "%s (core: %p, job: %p) (st: %d)\n",
+ __func__, core, job, job->job_state);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Remove from prio list */
+ spin_lock_irqsave(&core->lock, flags);
+ cancel_job(core, job);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return ret;
+}
+
+/* LOCAL FUNCTIONS BELOW */
+
+/**
+ * domain_disable_work_function()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable_work_function(struct work_struct *work)
+{
+ struct delayed_work *twork = to_delayed_work(work);
+ struct b2r2_core *core = container_of(
+ twork, struct b2r2_core, domain_disable_work);
+
+ if (!mutex_trylock(&core->domain_lock))
+ return;
+
+ if (core->domain_request_count == 0) {
+ core->valid = false;
+ exit_hw(core);
+ clk_disable(core->b2r2_clock);
+ regulator_disable(core->b2r2_reg);
+ core->domain_enabled = false;
+ }
+
+ mutex_unlock(&core->domain_lock);
+}
+
+/**
+ * domain_enable()
+ *
+ * @core: The b2r2 core entity
+ */
+static int domain_enable(struct b2r2_core *core)
+{
+ mutex_lock(&core->domain_lock);
+ core->domain_request_count++;
+
+ if (!core->domain_enabled) {
+ int retry = 0;
+ int ret;
+again:
+ /*
+ * Since regulator_enable() may sleep we have to handle
+ * interrupts.
+ */
+ ret = regulator_enable(core->b2r2_reg);
+ if ((ret == -EAGAIN) &&
+ ((retry++) < B2R2_REGULATOR_RETRY_COUNT))
+ goto again;
+ else if (ret < 0)
+ goto regulator_enable_failed;
+
+ ret = clk_enable(core->b2r2_clock);
+ if (ret < 0) {
+ b2r2_log_err(core->dev,
+ "%s: Could not enable clock\n", __func__);
+ goto enable_clk_failed;
+ }
+ if (init_hw(core) < 0)
+ goto init_hw_failed;
+ core->domain_enabled = true;
+ core->valid = true;
+ }
+
+ mutex_unlock(&core->domain_lock);
+
+ return 0;
+
+init_hw_failed:
+ b2r2_log_err(core->dev,
+ "%s: Could not initialize hardware!\n", __func__);
+ clk_disable(core->b2r2_clock);
+
+enable_clk_failed:
+ if (regulator_disable(core->b2r2_reg) < 0)
+ b2r2_log_err(core->dev, "%s: regulator_disable failed!\n",
+ __func__);
+
+regulator_enable_failed:
+ core->domain_request_count--;
+ mutex_unlock(&core->domain_lock);
+
+ return -EFAULT;
+}
+
+/**
+ * domain_disable()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable(struct b2r2_core *core)
+{
+ mutex_lock(&core->domain_lock);
+
+ if (core->domain_request_count == 0) {
+ b2r2_log_err(core->dev,
+ "%s: Unbalanced domain_disable()\n", __func__);
+ } else {
+ core->domain_request_count--;
+
+ /* Cancel any existing work */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ /* Add a work to disable the power and clock after a delay */
+ queue_delayed_work(core->work_queue, &core->domain_disable_work,
+ B2R2_DOMAIN_DISABLE_TIMEOUT);
+ }
+
+ mutex_unlock(&core->domain_lock);
+}
+
+/**
+ * stop_queue() - Stops the specified queue.
+ */
+static void stop_queue(enum b2r2_core_queue queue)
+{
+ /* TODO: Implement! If this function is not implemented canceled jobs
+ * will use b2r2 which is a waste of resources. Not stopping jobs will
+ * also screw up the hardware timing, the job the canceled job
+ * intrerrupted (if any) will be billed for the time between the point
+ * where the job is cancelled and when it stops. */
+}
+
+/**
+ * exit_job_list() - Empties a job queue by canceling the jobs
+ *
+ * @core: The b2r2 core entity
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_queue)
+{
+ while (!list_empty(job_queue)) {
+ struct b2r2_core_job *job =
+ list_entry(job_queue->next,
+ struct b2r2_core_job,
+ list);
+ /* Add reference to prevent job from disappearing
+ in the middle of our work, released below */
+ internal_job_addref(core, job, __func__);
+
+ cancel_job(core, job);
+
+ /* Matching release to addref above */
+ internal_job_release(core, job, __func__);
+
+ }
+}
+
+/**
+ * job_work_function() - Work queue function that calls callback(s) and
+ * checks if B2R2 can accept a new job
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core_job)
+ */
+static void job_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job =
+ container_of(ptr, struct b2r2_core_job, work);
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+
+ /* Disable B2R2 */
+ domain_disable(core);
+
+ /* Release resources */
+ if (job->release_resources)
+ job->release_resources(job, false);
+
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Dispatch a new job if possible */
+ check_prio_list(core, false);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Tell the client */
+ if (job->callback)
+ job->callback(job);
+
+ /* Drop our reference, matches the
+ addref in handle_queue_event or b2r2_core_job_cancel */
+ b2r2_core_job_release(job, __func__);
+}
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * timeout_work_function() - Work queue function that checks for
+ * timeout:ed jobs. B2R2 might silently refuse
+ * to execute some jobs, i.e. SRC2 fill
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core)
+ *
+ */
+static void timeout_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct list_head job_list;
+ struct delayed_work *twork = to_delayed_work(ptr);
+ struct b2r2_core *core = container_of(twork, struct b2r2_core,
+ timeout_work);
+
+ INIT_LIST_HEAD(&job_list);
+
+ /* Cancel all jobs if too long time since last irq */
+ spin_lock_irqsave(&core->lock, flags);
+ if (core->n_active_jobs > 0) {
+ unsigned long diff =
+ (long) jiffies - (long) core->jiffies_last_irq;
+ if (diff > JOB_TIMEOUT) {
+ /* Active jobs and more than a second since last irq! */
+ int i;
+
+ b2r2_core_print_stats(core);
+
+ /* Look for timeout:ed jobs and put them in tmp list.
+ * It's important that the application queues are
+ * killed in order of decreasing priority */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job =
+ core->active_jobs[i];
+
+ if (job) {
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ list_add_tail(&job->list, &job_list);
+ }
+ }
+
+ /* Print the B2R2 register and reset B2R2 */
+ printk_regs(core);
+ hw_reset(core);
+ }
+ }
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Handle timeout:ed jobs */
+ spin_lock_irqsave(&core->lock, flags);
+ while (!list_empty(&job_list)) {
+ struct b2r2_core_job *job =
+ list_entry(job_list.next,
+ struct b2r2_core_job,
+ list);
+
+ b2r2_log_warn(core->dev, "%s: Job timeout\n", __func__);
+
+ list_del_init(&job->list);
+
+ /* Job is cancelled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Job callbacks handled via work queue */
+ queue_work(core->work_queue, &job->work);
+ }
+
+ /* Requeue delayed work */
+ if (core->n_active_jobs)
+ queue_delayed_work(
+ core->work_queue,
+ &core->timeout_work, JOB_TIMEOUT);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+}
+#endif
+
+/**
+ * reset_hw_timer() - Resets a job's hardware timer. Must be called before
+ * the timer is used.
+ *
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void reset_hw_timer(struct b2r2_core_job *job)
+{
+ job->nsec_active_in_hw = 0;
+}
+
+/**
+ * start_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly before starting the
+ * hardware.
+ *
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void start_hw_timer(struct b2r2_core_job *job)
+{
+ job->hw_start_time = b2r2_get_curr_nsec();
+}
+
+/**
+ * stop_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly after the hardware has
+ * finished.
+ *
+ * @core: The b2r2 core entity
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void stop_hw_timer(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ /* Assumes only app queues are used, which is the case right now. */
+ /* Not 100% accurate. When a higher prio job interrupts a lower prio job it does
+ so after the current node of the low prio job has finished. Currently we can not
+ sense when the actual switch takes place so the time reported for a job that
+ interrupts a lower prio job will on average contain the time it takes to process
+ half a node in the lower prio job in addition to the time it takes to process the
+ job's own nodes. This could possibly be solved by adding node notifications but
+ that would involve a significant amount of work and consume system resources due
+ to the extra interrupts. */
+ /* If a job takes more than ~2s (absolute time, including idleing in the hardware)
+ the state of the hardware timer will be corrupted and it will not report valid
+ values until b2r2 becomes idle (no active jobs on any queues). The maximum length
+ can possibly be increased by using 64 bit integers. */
+
+ int i;
+
+ u32 stop_time_raw = b2r2_get_curr_nsec();
+ /* We'll add an offset to all positions in time to make the current time equal to
+ 0xFFFFFFFF. This way we can compare positions in time to each other without having
+ to wory about wrapping (so long as all positions in time are in the past). */
+ u32 stop_time = 0xFFFFFFFF;
+ u32 time_pos_offset = 0xFFFFFFFF - stop_time_raw;
+ u32 nsec_in_hw = stop_time - (job->hw_start_time + time_pos_offset);
+ job->nsec_active_in_hw += (s32)nsec_in_hw;
+
+ /* Check if we have delayed the start of higher prio jobs. Can happen as queue
+ switching only can be done between nodes. */
+ for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) {
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job->hw_start_time = stop_time_raw;
+ }
+
+ /* Check if the job has stolen time from lower prio jobs */
+ for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) {
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
+ u32 queue_active_job_hw_start_time;
+
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job_hw_start_time =
+ queue_active_job->hw_start_time +
+ time_pos_offset;
+
+ if (queue_active_job_hw_start_time < stop_time) {
+ u32 queue_active_job_nsec_in_hw = stop_time -
+ queue_active_job_hw_start_time;
+ u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw,
+ nsec_in_hw);
+
+ queue_active_job->nsec_active_in_hw -= (s32)num_stolen_nsec;
+
+ nsec_in_hw -= num_stolen_nsec;
+ stop_time -= num_stolen_nsec;
+ }
+
+ if (0 == nsec_in_hw)
+ break;
+ }
+}
+
+/**
+ * init_job() - Initializes a job structure from filled in client data.
+ * Reference count will be set to 1
+ *
+ * @job: Job to initialize
+ */
+static void init_job(struct b2r2_core_job *job)
+{
+
+ job->start_sentinel = START_SENTINEL;
+ job->end_sentinel = END_SENTINEL;
+
+ /* Job is idle, never queued */
+ job->job_state = B2R2_CORE_JOB_IDLE;
+
+ /* Initialize internal data */
+ INIT_LIST_HEAD(&job->list);
+ init_waitqueue_head(&job->event);
+ INIT_WORK(&job->work, job_work_function);
+
+ /* Map given prio to B2R2 queues */
+ if (job->prio < B2R2_CORE_LOWEST_PRIO)
+ job->prio = B2R2_CORE_LOWEST_PRIO;
+ else if (job->prio > B2R2_CORE_HIGHEST_PRIO)
+ job->prio = B2R2_CORE_HIGHEST_PRIO;
+
+ if (job->prio > 10) {
+ job->queue = B2R2_CORE_QUEUE_AQ1;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ1_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_3);
+ } else if (job->prio > 0) {
+ job->queue = B2R2_CORE_QUEUE_AQ2;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ2_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_2);
+ } else if (job->prio > -10) {
+ job->queue = B2R2_CORE_QUEUE_AQ3;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ3_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_1);
+ } else {
+ job->queue = B2R2_CORE_QUEUE_AQ4;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ4_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_0);
+ }
+}
+
+/**
+ * clear_interrupts() - Disables all interrupts
+ *
+ * core->lock _must_ be held
+ */
+static void clear_interrupts(struct b2r2_core *core)
+{
+ writel(0x0, &core->hw->BLT_ITM0);
+ writel(0x0, &core->hw->BLT_ITM1);
+ writel(0x0, &core->hw->BLT_ITM2);
+ writel(0x0, &core->hw->BLT_ITM3);
+}
+
+/**
+ * insert_into_prio_list() - Inserts the job into the sorted list of jobs.
+ * The list is sorted by priority.
+ *
+ * @core: The b2r2 core entity
+ * @job: Job to insert
+ *
+ * core->lock _must_ be held
+ */
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job)
+{
+ /* Ref count is increased when job put in list,
+ should be released when job is removed from list */
+ internal_job_addref(core, job, __func__);
+
+ core->stat_n_jobs_in_prio_list++;
+
+ /* Sort in the job */
+ if (list_empty(&core->prio_queue))
+ list_add_tail(&job->list, &core->prio_queue);
+ else {
+ struct b2r2_core_job *first_job = list_entry(
+ core->prio_queue.next,
+ struct b2r2_core_job, list);
+ struct b2r2_core_job *last_job = list_entry(
+ core->prio_queue.prev,
+ struct b2r2_core_job, list);
+
+ if (job->prio > first_job->prio)
+ list_add(&job->list, &core->prio_queue);
+ else if (job->prio <= last_job->prio)
+ list_add_tail(&job->list, &core->prio_queue);
+ else {
+ /* We need to find where to put it */
+ struct list_head *ptr;
+
+ list_for_each(ptr, &core->prio_queue) {
+ struct b2r2_core_job *list_job =
+ list_entry(ptr, struct b2r2_core_job,
+ list);
+ if (job->prio > list_job->prio) {
+ list_add_tail(&job->list,
+ &list_job->list);
+ break;
+ }
+ }
+ }
+ }
+ /* The job is now queued */
+ job->job_state = B2R2_CORE_JOB_QUEUED;
+}
+
+/**
+ * check_prio_list() - Checks if the first job(s) in the prio list can
+ * be dispatched to B2R2
+ *
+ * @core: The b2r2 core entity
+ * @atomic: true if in atomic context (i.e. interrupt context)
+ *
+ * core->lock _must_ be held
+ */
+static void check_prio_list(struct b2r2_core *core, bool atomic)
+{
+ bool dispatched_job;
+ int n_dispatched = 0;
+ struct b2r2_core_job *job;
+
+ do {
+ dispatched_job = false;
+
+ /* Do we have anything in our prio list? */
+ if (list_empty(&core->prio_queue))
+ break;
+
+ /* The first job waiting */
+ job = list_first_entry(&core->prio_queue,
+ struct b2r2_core_job, list);
+
+ /* Is the B2R2 queue available? */
+ if (core->active_jobs[job->queue] != NULL)
+ break;
+
+ /* Can we acquire resources? */
+ if (!job->acquire_resources ||
+ job->acquire_resources(job, atomic) == 0) {
+ /* Ok to dispatch job */
+
+ /* Remove from list */
+ list_del_init(&job->list);
+
+ /* The job is now active */
+ core->active_jobs[job->queue] = job;
+ core->n_active_jobs++;
+ job->jiffies = jiffies;
+ core->jiffies_last_active = jiffies;
+
+ /* Kick off B2R2 */
+ trigger_job(core, job);
+ dispatched_job = true;
+ n_dispatched++;
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Check in one half second if it hangs */
+ queue_delayed_work(core->work_queue,
+ &core->timeout_work, JOB_TIMEOUT);
+#endif
+ } else {
+ /* No resources */
+ if (!atomic && core->n_active_jobs == 0) {
+ b2r2_log_warn(core->dev,
+ "%s: No resource", __func__);
+ cancel_job(core, job);
+ }
+ }
+ } while (dispatched_job);
+
+ core->stat_n_jobs_in_prio_list -= n_dispatched;
+}
+
+/**
+ * find_job_in_list() - Finds job with job_id in list
+ *
+ * @jobid: Job id to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock _must_ be held
+ */
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job = list_entry(
+ ptr, struct b2r2_core_job, list);
+ if (job->job_id == job_id) {
+ struct b2r2_core *core = (struct b2r2_core *) job->data;
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(core, job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_job_in_active_jobs() - Finds job in active job queues
+ *
+ * @core: The b2r2 core entity
+ * @job_id: Job id to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock _must_ be held
+ */
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
+
+ if (job && job->job_id == job_id) {
+ internal_job_addref(core, job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+/**
+ * find_tag_in_list() - Finds first job with tag in list
+ *
+ * @tag: Tag to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job =
+ list_entry(ptr, struct b2r2_core_job, list);
+ if (job->tag == tag) {
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(core, job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_tag_in_active_jobs() - Finds job with tag in active job queues
+ *
+ * @tag: Tag to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
+
+ if (job && job->tag == tag) {
+ internal_job_addref(core, job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * hw_reset() - Resets B2R2 hardware
+ *
+ * core->lock must be held
+ */
+static int hw_reset(struct b2r2_core *core)
+{
+ u32 uTimeOut = B2R2_RESET_TIMEOUT_VALUE;
+
+ /* Tell B2R2 to reset */
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+ writel(0x00000000, &core->hw->BLT_CTL);
+
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&core->hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+
+ if (uTimeOut == 0) {
+ b2r2_log_warn(core->dev,
+ "error-> after software reset B2R2 is not idle\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+
+}
+#endif
+
+/**
+ * trigger_job() - Put job in B2R2 HW queue
+ *
+ * @job: Job to trigger
+ *
+ * core->lock must be held
+ */
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ /* Debug prints */
+ b2r2_log_info(core->dev, "queue 0x%x\n", job->queue);
+ b2r2_log_info(core->dev, "BLT TRIG_IP 0x%x (first node)\n",
+ job->first_node_address);
+ b2r2_log_info(core->dev, "BLT LNA_CTL 0x%x (last node)\n",
+ job->last_node_address);
+ b2r2_log_info(core->dev, "BLT TRIG_CTL 0x%x\n", job->control);
+ b2r2_log_info(core->dev, "BLT PACE_CTL 0x%x\n", job->pace_control);
+
+ reset_hw_timer(job);
+ job->job_state = B2R2_CORE_JOB_RUNNING;
+
+ /* Enable interrupt */
+ writel(readl(&core->hw->BLT_ITM0) | job->interrupt_context,
+ &core->hw->BLT_ITM0);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS1_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS1_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS1_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS1_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS2_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS2_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS2_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS2_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS3_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS3_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS3_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS3_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGT_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGT_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGT_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGT_PGZ);
+
+ /* B2R2 kicks off when LNA is written, LNA write must be last! */
+ switch (job->queue) {
+ case B2R2_CORE_QUEUE_CQ1:
+ writel(job->first_node_address, &core->hw->BLT_CQ1_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ1_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ1_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_CQ2:
+ writel(job->first_node_address, &core->hw->BLT_CQ2_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ2_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ2_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ1:
+ writel(job->control, &core->hw->BLT_AQ1_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ1_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ1_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ2:
+ writel(job->control, &core->hw->BLT_AQ2_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ2_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ2_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ3:
+ writel(job->control, &core->hw->BLT_AQ3_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ3_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ3_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ4:
+ writel(job->control, &core->hw->BLT_AQ4_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ4_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ4_LNA);
+ break;
+
+ /** Handle the default case */
+ default:
+ break;
+
+ } /* end switch */
+
+}
+
+/**
+ * handle_queue_event() - Handles interrupt event for specified B2R2 queue
+ *
+ * @queue: Queue to handle event for
+ *
+ * core->lock must be held
+ */
+static void handle_queue_event(struct b2r2_core *core,
+ enum b2r2_core_queue queue)
+{
+ struct b2r2_core_job *job;
+
+ job = core->active_jobs[queue];
+ if (job) {
+ if (job->job_state != B2R2_CORE_JOB_RUNNING)
+ /* Should be running
+ Severe error. TBD */
+ b2r2_log_warn(core->dev,
+ "%s: Job is not running", __func__);
+
+ stop_hw_timer(core, job);
+
+ /* Remove from queue */
+ BUG_ON(core->n_active_jobs == 0);
+ core->active_jobs[queue] = NULL;
+ core->n_active_jobs--;
+ }
+
+ if (!job) {
+ /* No job, error? */
+ b2r2_log_warn(core->dev, "%s: No job", __func__);
+ return;
+ }
+
+
+ /* Atomic context release resources, release resources will
+ be called again later from process context (work queue) */
+ if (job->release_resources)
+ job->release_resources(job, true);
+
+ /* Job is done */
+ job->job_state = B2R2_CORE_JOB_DONE;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Dispatch to work queue to handle callbacks */
+ queue_work(core->work_queue, &job->work);
+}
+
+/**
+ * process_events() - Handles interrupt events
+ *
+ * @status: Contents of the B2R2 ITS register
+ */
+static void process_events(struct b2r2_core *core, u32 status)
+{
+ u32 mask = 0xF;
+ u32 disable_itm_mask = 0;
+
+ b2r2_log_info(core->dev, "Enters process_events\n");
+ b2r2_log_info(core->dev, "status 0x%x\n", status);
+
+ /* Composition queue 1 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Composition queue 2 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 8;
+
+ /* Application queue 1 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 2 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 3 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ3);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 4 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ4);
+ disable_itm_mask |= mask;
+ }
+
+ /* Clear received interrupt flags */
+ writel(status, &core->hw->BLT_ITS);
+ /* Disable handled interrupts */
+ writel(readl(&core->hw->BLT_ITM0) & ~disable_itm_mask,
+ &core->hw->BLT_ITM0);
+
+ b2r2_log_info(core->dev, "Returns process_events\n");
+}
+
+/**
+ * b2r2_irq_handler() - B2R2 interrupt handler
+ *
+ * @irq: Interrupt number (not used)
+ * @dev_id: A pointer to the b2r2 core entity
+ */
+static irqreturn_t b2r2_irq_handler(int irq, void *dev_id)
+{
+ unsigned long flags;
+ struct b2r2_core* core = (struct b2r2_core *) dev_id;
+
+ /* Spin lock is need in irq handler (SMP) */
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Make a quick exit if this device was not interrupting */
+ if (!core->valid ||
+ ((readl(&core->hw->BLT_ITS) & B2R2_ITS_MASK) == 0)) {
+ core->stat_n_irq_skipped++;
+ spin_unlock_irqrestore(&core->lock, flags);
+ return IRQ_NONE;
+ }
+
+ /* Remember time for last irq (for timeout mgmt) */
+ core->jiffies_last_irq = jiffies;
+ core->stat_n_irq++;
+
+ /* Handle the interrupt(s) */
+ process_events(core, readl(&core->hw->BLT_ITS));
+
+ /* Check if we can dispatch new jobs */
+ check_prio_list(core, true);
+
+ core->stat_n_irq_exit++;
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * struct debugfs_reg - Represents one B2R2 register in debugfs
+ *
+ * @name: Register name
+ * @offset: Byte offset in B2R2 for register
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_regs - Array of B2R2 debugfs registers
+ */
+static const struct debugfs_reg debugfs_regs[] = {
+ {"BLT_SSBA17", offsetof(struct b2r2_memory_map, BLT_SSBA17)},
+ {"BLT_SSBA18", offsetof(struct b2r2_memory_map, BLT_SSBA18)},
+ {"BLT_SSBA19", offsetof(struct b2r2_memory_map, BLT_SSBA19)},
+ {"BLT_SSBA20", offsetof(struct b2r2_memory_map, BLT_SSBA20)},
+ {"BLT_SSBA21", offsetof(struct b2r2_memory_map, BLT_SSBA21)},
+ {"BLT_SSBA22", offsetof(struct b2r2_memory_map, BLT_SSBA22)},
+ {"BLT_SSBA23", offsetof(struct b2r2_memory_map, BLT_SSBA23)},
+ {"BLT_SSBA24", offsetof(struct b2r2_memory_map, BLT_SSBA24)},
+ {"BLT_STBA5", offsetof(struct b2r2_memory_map, BLT_STBA5)},
+ {"BLT_STBA6", offsetof(struct b2r2_memory_map, BLT_STBA6)},
+ {"BLT_STBA7", offsetof(struct b2r2_memory_map, BLT_STBA7)},
+ {"BLT_STBA8", offsetof(struct b2r2_memory_map, BLT_STBA8)},
+ {"BLT_CTL", offsetof(struct b2r2_memory_map, BLT_CTL)},
+ {"BLT_ITS", offsetof(struct b2r2_memory_map, BLT_ITS)},
+ {"BLT_STA1", offsetof(struct b2r2_memory_map, BLT_STA1)},
+ {"BLT_SSBA1", offsetof(struct b2r2_memory_map, BLT_SSBA1)},
+ {"BLT_SSBA2", offsetof(struct b2r2_memory_map, BLT_SSBA2)},
+ {"BLT_SSBA3", offsetof(struct b2r2_memory_map, BLT_SSBA3)},
+ {"BLT_SSBA4", offsetof(struct b2r2_memory_map, BLT_SSBA4)},
+ {"BLT_SSBA5", offsetof(struct b2r2_memory_map, BLT_SSBA5)},
+ {"BLT_SSBA6", offsetof(struct b2r2_memory_map, BLT_SSBA6)},
+ {"BLT_SSBA7", offsetof(struct b2r2_memory_map, BLT_SSBA7)},
+ {"BLT_SSBA8", offsetof(struct b2r2_memory_map, BLT_SSBA8)},
+ {"BLT_STBA1", offsetof(struct b2r2_memory_map, BLT_STBA1)},
+ {"BLT_STBA2", offsetof(struct b2r2_memory_map, BLT_STBA2)},
+ {"BLT_STBA3", offsetof(struct b2r2_memory_map, BLT_STBA3)},
+ {"BLT_STBA4", offsetof(struct b2r2_memory_map, BLT_STBA4)},
+ {"BLT_CQ1_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_TRIG_IP)},
+ {"BLT_CQ1_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_TRIG_CTL)},
+ {"BLT_CQ1_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_PACE_CTL)},
+ {"BLT_CQ1_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_IP)},
+ {"BLT_CQ2_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_TRIG_IP)},
+ {"BLT_CQ2_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_TRIG_CTL)},
+ {"BLT_CQ2_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_PACE_CTL)},
+ {"BLT_CQ2_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_IP)},
+ {"BLT_AQ1_CTL", offsetof(struct b2r2_memory_map, BLT_AQ1_CTL)},
+ {"BLT_AQ1_IP", offsetof(struct b2r2_memory_map, BLT_AQ1_IP)},
+ {"BLT_AQ1_LNA", offsetof(struct b2r2_memory_map, BLT_AQ1_LNA)},
+ {"BLT_AQ1_STA", offsetof(struct b2r2_memory_map, BLT_AQ1_STA)},
+ {"BLT_AQ2_CTL", offsetof(struct b2r2_memory_map, BLT_AQ2_CTL)},
+ {"BLT_AQ2_IP", offsetof(struct b2r2_memory_map, BLT_AQ2_IP)},
+ {"BLT_AQ2_LNA", offsetof(struct b2r2_memory_map, BLT_AQ2_LNA)},
+ {"BLT_AQ2_STA", offsetof(struct b2r2_memory_map, BLT_AQ2_STA)},
+ {"BLT_AQ3_CTL", offsetof(struct b2r2_memory_map, BLT_AQ3_CTL)},
+ {"BLT_AQ3_IP", offsetof(struct b2r2_memory_map, BLT_AQ3_IP)},
+ {"BLT_AQ3_LNA", offsetof(struct b2r2_memory_map, BLT_AQ3_LNA)},
+ {"BLT_AQ3_STA", offsetof(struct b2r2_memory_map, BLT_AQ3_STA)},
+ {"BLT_AQ4_CTL", offsetof(struct b2r2_memory_map, BLT_AQ4_CTL)},
+ {"BLT_AQ4_IP", offsetof(struct b2r2_memory_map, BLT_AQ4_IP)},
+ {"BLT_AQ4_LNA", offsetof(struct b2r2_memory_map, BLT_AQ4_LNA)},
+ {"BLT_AQ4_STA", offsetof(struct b2r2_memory_map, BLT_AQ4_STA)},
+ {"BLT_SSBA9", offsetof(struct b2r2_memory_map, BLT_SSBA9)},
+ {"BLT_SSBA10", offsetof(struct b2r2_memory_map, BLT_SSBA10)},
+ {"BLT_SSBA11", offsetof(struct b2r2_memory_map, BLT_SSBA11)},
+ {"BLT_SSBA12", offsetof(struct b2r2_memory_map, BLT_SSBA12)},
+ {"BLT_SSBA13", offsetof(struct b2r2_memory_map, BLT_SSBA13)},
+ {"BLT_SSBA14", offsetof(struct b2r2_memory_map, BLT_SSBA14)},
+ {"BLT_SSBA15", offsetof(struct b2r2_memory_map, BLT_SSBA15)},
+ {"BLT_SSBA16", offsetof(struct b2r2_memory_map, BLT_SSBA16)},
+ {"BLT_SGA1", offsetof(struct b2r2_memory_map, BLT_SGA1)},
+ {"BLT_SGA2", offsetof(struct b2r2_memory_map, BLT_SGA2)},
+ {"BLT_ITM0", offsetof(struct b2r2_memory_map, BLT_ITM0)},
+ {"BLT_ITM1", offsetof(struct b2r2_memory_map, BLT_ITM1)},
+ {"BLT_ITM2", offsetof(struct b2r2_memory_map, BLT_ITM2)},
+ {"BLT_ITM3", offsetof(struct b2r2_memory_map, BLT_ITM3)},
+ {"BLT_DFV2", offsetof(struct b2r2_memory_map, BLT_DFV2)},
+ {"BLT_DFV1", offsetof(struct b2r2_memory_map, BLT_DFV1)},
+ {"BLT_PRI", offsetof(struct b2r2_memory_map, BLT_PRI)},
+ {"PLUGS1_OP2", offsetof(struct b2r2_memory_map, PLUGS1_OP2)},
+ {"PLUGS1_CHZ", offsetof(struct b2r2_memory_map, PLUGS1_CHZ)},
+ {"PLUGS1_MSZ", offsetof(struct b2r2_memory_map, PLUGS1_MSZ)},
+ {"PLUGS1_PGZ", offsetof(struct b2r2_memory_map, PLUGS1_PGZ)},
+ {"PLUGS2_OP2", offsetof(struct b2r2_memory_map, PLUGS2_OP2)},
+ {"PLUGS2_CHZ", offsetof(struct b2r2_memory_map, PLUGS2_CHZ)},
+ {"PLUGS2_MSZ", offsetof(struct b2r2_memory_map, PLUGS2_MSZ)},
+ {"PLUGS2_PGZ", offsetof(struct b2r2_memory_map, PLUGS2_PGZ)},
+ {"PLUGS3_OP2", offsetof(struct b2r2_memory_map, PLUGS3_OP2)},
+ {"PLUGS3_CHZ", offsetof(struct b2r2_memory_map, PLUGS3_CHZ)},
+ {"PLUGS3_MSZ", offsetof(struct b2r2_memory_map, PLUGS3_MSZ)},
+ {"PLUGS3_PGZ", offsetof(struct b2r2_memory_map, PLUGS3_PGZ)},
+ {"PLUGT_OP2", offsetof(struct b2r2_memory_map, PLUGT_OP2)},
+ {"PLUGT_CHZ", offsetof(struct b2r2_memory_map, PLUGT_CHZ)},
+ {"PLUGT_MSZ", offsetof(struct b2r2_memory_map, PLUGT_MSZ)},
+ {"PLUGT_PGZ", offsetof(struct b2r2_memory_map, PLUGT_PGZ)},
+ {"BLT_NIP", offsetof(struct b2r2_memory_map, BLT_NIP)},
+ {"BLT_CIC", offsetof(struct b2r2_memory_map, BLT_CIC)},
+ {"BLT_INS", offsetof(struct b2r2_memory_map, BLT_INS)},
+ {"BLT_ACK", offsetof(struct b2r2_memory_map, BLT_ACK)},
+ {"BLT_TBA", offsetof(struct b2r2_memory_map, BLT_TBA)},
+ {"BLT_TTY", offsetof(struct b2r2_memory_map, BLT_TTY)},
+ {"BLT_TXY", offsetof(struct b2r2_memory_map, BLT_TXY)},
+ {"BLT_TSZ", offsetof(struct b2r2_memory_map, BLT_TSZ)},
+ {"BLT_S1CF", offsetof(struct b2r2_memory_map, BLT_S1CF)},
+ {"BLT_S2CF", offsetof(struct b2r2_memory_map, BLT_S2CF)},
+ {"BLT_S1BA", offsetof(struct b2r2_memory_map, BLT_S1BA)},
+ {"BLT_S1TY", offsetof(struct b2r2_memory_map, BLT_S1TY)},
+ {"BLT_S1XY", offsetof(struct b2r2_memory_map, BLT_S1XY)},
+ {"BLT_S2BA", offsetof(struct b2r2_memory_map, BLT_S2BA)},
+ {"BLT_S2TY", offsetof(struct b2r2_memory_map, BLT_S2TY)},
+ {"BLT_S2XY", offsetof(struct b2r2_memory_map, BLT_S2XY)},
+ {"BLT_S2SZ", offsetof(struct b2r2_memory_map, BLT_S2SZ)},
+ {"BLT_S3BA", offsetof(struct b2r2_memory_map, BLT_S3BA)},
+ {"BLT_S3TY", offsetof(struct b2r2_memory_map, BLT_S3TY)},
+ {"BLT_S3XY", offsetof(struct b2r2_memory_map, BLT_S3XY)},
+ {"BLT_S3SZ", offsetof(struct b2r2_memory_map, BLT_S3SZ)},
+ {"BLT_CWO", offsetof(struct b2r2_memory_map, BLT_CWO)},
+ {"BLT_CWS", offsetof(struct b2r2_memory_map, BLT_CWS)},
+ {"BLT_CCO", offsetof(struct b2r2_memory_map, BLT_CCO)},
+ {"BLT_CML", offsetof(struct b2r2_memory_map, BLT_CML)},
+ {"BLT_FCTL", offsetof(struct b2r2_memory_map, BLT_FCTL)},
+ {"BLT_PMK", offsetof(struct b2r2_memory_map, BLT_PMK)},
+ {"BLT_RSF", offsetof(struct b2r2_memory_map, BLT_RSF)},
+ {"BLT_RZI", offsetof(struct b2r2_memory_map, BLT_RZI)},
+ {"BLT_HFP", offsetof(struct b2r2_memory_map, BLT_HFP)},
+ {"BLT_VFP", offsetof(struct b2r2_memory_map, BLT_VFP)},
+ {"BLT_Y_RSF", offsetof(struct b2r2_memory_map, BLT_Y_RSF)},
+ {"BLT_Y_RZI", offsetof(struct b2r2_memory_map, BLT_Y_RZI)},
+ {"BLT_Y_HFP", offsetof(struct b2r2_memory_map, BLT_Y_HFP)},
+ {"BLT_Y_VFP", offsetof(struct b2r2_memory_map, BLT_Y_VFP)},
+ {"BLT_KEY1", offsetof(struct b2r2_memory_map, BLT_KEY1)},
+ {"BLT_KEY2", offsetof(struct b2r2_memory_map, BLT_KEY2)},
+ {"BLT_SAR", offsetof(struct b2r2_memory_map, BLT_SAR)},
+ {"BLT_USR", offsetof(struct b2r2_memory_map, BLT_USR)},
+ {"BLT_IVMX0", offsetof(struct b2r2_memory_map, BLT_IVMX0)},
+ {"BLT_IVMX1", offsetof(struct b2r2_memory_map, BLT_IVMX1)},
+ {"BLT_IVMX2", offsetof(struct b2r2_memory_map, BLT_IVMX2)},
+ {"BLT_IVMX3", offsetof(struct b2r2_memory_map, BLT_IVMX3)},
+ {"BLT_OVMX0", offsetof(struct b2r2_memory_map, BLT_OVMX0)},
+ {"BLT_OVMX1", offsetof(struct b2r2_memory_map, BLT_OVMX1)},
+ {"BLT_OVMX2", offsetof(struct b2r2_memory_map, BLT_OVMX2)},
+ {"BLT_OVMX3", offsetof(struct b2r2_memory_map, BLT_OVMX3)},
+ {"BLT_VC1R", offsetof(struct b2r2_memory_map, BLT_VC1R)},
+ {"BLT_Y_HFC0", offsetof(struct b2r2_memory_map, BLT_Y_HFC0)},
+ {"BLT_Y_HFC1", offsetof(struct b2r2_memory_map, BLT_Y_HFC1)},
+ {"BLT_Y_HFC2", offsetof(struct b2r2_memory_map, BLT_Y_HFC2)},
+ {"BLT_Y_HFC3", offsetof(struct b2r2_memory_map, BLT_Y_HFC3)},
+ {"BLT_Y_HFC4", offsetof(struct b2r2_memory_map, BLT_Y_HFC4)},
+ {"BLT_Y_HFC5", offsetof(struct b2r2_memory_map, BLT_Y_HFC5)},
+ {"BLT_Y_HFC6", offsetof(struct b2r2_memory_map, BLT_Y_HFC6)},
+ {"BLT_Y_HFC7", offsetof(struct b2r2_memory_map, BLT_Y_HFC7)},
+ {"BLT_Y_HFC8", offsetof(struct b2r2_memory_map, BLT_Y_HFC8)},
+ {"BLT_Y_HFC9", offsetof(struct b2r2_memory_map, BLT_Y_HFC9)},
+ {"BLT_Y_HFC10", offsetof(struct b2r2_memory_map, BLT_Y_HFC10)},
+ {"BLT_Y_HFC11", offsetof(struct b2r2_memory_map, BLT_Y_HFC11)},
+ {"BLT_Y_HFC12", offsetof(struct b2r2_memory_map, BLT_Y_HFC12)},
+ {"BLT_Y_HFC13", offsetof(struct b2r2_memory_map, BLT_Y_HFC13)},
+ {"BLT_Y_HFC14", offsetof(struct b2r2_memory_map, BLT_Y_HFC14)},
+ {"BLT_Y_HFC15", offsetof(struct b2r2_memory_map, BLT_Y_HFC15)},
+ {"BLT_Y_VFC0", offsetof(struct b2r2_memory_map, BLT_Y_VFC0)},
+ {"BLT_Y_VFC1", offsetof(struct b2r2_memory_map, BLT_Y_VFC1)},
+ {"BLT_Y_VFC2", offsetof(struct b2r2_memory_map, BLT_Y_VFC2)},
+ {"BLT_Y_VFC3", offsetof(struct b2r2_memory_map, BLT_Y_VFC3)},
+ {"BLT_Y_VFC4", offsetof(struct b2r2_memory_map, BLT_Y_VFC4)},
+ {"BLT_Y_VFC5", offsetof(struct b2r2_memory_map, BLT_Y_VFC5)},
+ {"BLT_Y_VFC6", offsetof(struct b2r2_memory_map, BLT_Y_VFC6)},
+ {"BLT_Y_VFC7", offsetof(struct b2r2_memory_map, BLT_Y_VFC7)},
+ {"BLT_Y_VFC8", offsetof(struct b2r2_memory_map, BLT_Y_VFC8)},
+ {"BLT_Y_VFC9", offsetof(struct b2r2_memory_map, BLT_Y_VFC9)},
+ {"BLT_HFC0", offsetof(struct b2r2_memory_map, BLT_HFC0)},
+ {"BLT_HFC1", offsetof(struct b2r2_memory_map, BLT_HFC1)},
+ {"BLT_HFC2", offsetof(struct b2r2_memory_map, BLT_HFC2)},
+ {"BLT_HFC3", offsetof(struct b2r2_memory_map, BLT_HFC3)},
+ {"BLT_HFC4", offsetof(struct b2r2_memory_map, BLT_HFC4)},
+ {"BLT_HFC5", offsetof(struct b2r2_memory_map, BLT_HFC5)},
+ {"BLT_HFC6", offsetof(struct b2r2_memory_map, BLT_HFC6)},
+ {"BLT_HFC7", offsetof(struct b2r2_memory_map, BLT_HFC7)},
+ {"BLT_HFC8", offsetof(struct b2r2_memory_map, BLT_HFC8)},
+ {"BLT_HFC9", offsetof(struct b2r2_memory_map, BLT_HFC9)},
+ {"BLT_HFC10", offsetof(struct b2r2_memory_map, BLT_HFC10)},
+ {"BLT_HFC11", offsetof(struct b2r2_memory_map, BLT_HFC11)},
+ {"BLT_HFC12", offsetof(struct b2r2_memory_map, BLT_HFC12)},
+ {"BLT_HFC13", offsetof(struct b2r2_memory_map, BLT_HFC13)},
+ {"BLT_HFC14", offsetof(struct b2r2_memory_map, BLT_HFC14)},
+ {"BLT_HFC15", offsetof(struct b2r2_memory_map, BLT_HFC15)},
+ {"BLT_VFC0", offsetof(struct b2r2_memory_map, BLT_VFC0)},
+ {"BLT_VFC1", offsetof(struct b2r2_memory_map, BLT_VFC1)},
+ {"BLT_VFC2", offsetof(struct b2r2_memory_map, BLT_VFC2)},
+ {"BLT_VFC3", offsetof(struct b2r2_memory_map, BLT_VFC3)},
+ {"BLT_VFC4", offsetof(struct b2r2_memory_map, BLT_VFC4)},
+ {"BLT_VFC5", offsetof(struct b2r2_memory_map, BLT_VFC5)},
+ {"BLT_VFC6", offsetof(struct b2r2_memory_map, BLT_VFC6)},
+ {"BLT_VFC7", offsetof(struct b2r2_memory_map, BLT_VFC7)},
+ {"BLT_VFC8", offsetof(struct b2r2_memory_map, BLT_VFC8)},
+ {"BLT_VFC9", offsetof(struct b2r2_memory_map, BLT_VFC9)},
+};
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * printk_regs() - Print B2R2 registers to printk
+ */
+static void printk_regs(struct b2r2_core *core)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value = readl(
+ (unsigned long *) (((u8 *) core->hw) +
+ debugfs_regs[i].offset));
+ b2r2_log_regdump(core->dev, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+#endif
+}
+#endif
+
+/**
+ * debugfs_b2r2_reg_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size;
+ int ret = 0;
+ unsigned long value;
+ char *tmpbuf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (tmpbuf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Read from B2R2 */
+ value = readl((unsigned long *)
+ filp->f_dentry->d_inode->i_private);
+
+ /* Build the string */
+ dev_size = sprintf(tmpbuf, "%8lX\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ /* Return it to user space */
+ if (copy_to_user(buf, tmpbuf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (tmpbuf != NULL)
+ kfree(tmpbuf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_write() - Implements debugfs write for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_reg_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char tmpbuf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ /* Adjust count */
+ if (count >= sizeof(tmpbuf))
+ count = sizeof(tmpbuf) - 1;
+ /* Get it from user space */
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EINVAL;
+ tmpbuf[count] = 0;
+ /* Convert from hex string */
+ if (sscanf(tmpbuf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ writel(reg_value, (u32 *)
+ filp->f_dentry->d_inode->i_private);
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_fops() - File operations for B2R2 register debugfs
+ */
+static const struct file_operations debugfs_b2r2_reg_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_reg_read,
+ .write = debugfs_b2r2_reg_write,
+};
+
+/**
+ * debugfs_b2r2_regs_read() - Implements debugfs read for B2R2 register dump
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_regs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i;
+ char *tmpbuf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (tmpbuf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a giant string containing all registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value =
+ readl((u32 *) (((u8 *)
+ filp->f_dentry->d_inode->i_private) +
+ debugfs_regs[i].offset));
+ dev_size += sprintf(tmpbuf + dev_size, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, tmpbuf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (tmpbuf != NULL)
+ kfree(tmpbuf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_regs_fops() - File operations for B2R2 register dump debugfs
+ */
+static const struct file_operations debugfs_b2r2_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_regs_read,
+};
+
+/**
+ * debugfs_b2r2_stat_read() - Implements debugfs read for B2R2 statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i = 0;
+ char *tmpbuf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ if (tmpbuf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a string containing all statistics */
+ dev_size += sprintf(tmpbuf + dev_size, "Interrupts : %lu\n",
+ core->stat_n_irq);
+ dev_size += sprintf(tmpbuf + dev_size, "Added jobs : %lu\n",
+ core->stat_n_jobs_added);
+ dev_size += sprintf(tmpbuf + dev_size, "Removed jobs : %lu\n",
+ core->stat_n_jobs_removed);
+ dev_size += sprintf(tmpbuf + dev_size, "Jobs in prio list : %lu\n",
+ core->stat_n_jobs_in_prio_list);
+ dev_size += sprintf(tmpbuf + dev_size, "Active jobs : %lu\n",
+ core->n_active_jobs);
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++)
+ dev_size += sprintf(tmpbuf + dev_size,
+ " Job in queue %d : 0x%08lx\n",
+ i, (unsigned long) core->active_jobs[i]);
+ dev_size += sprintf(tmpbuf + dev_size, "Clock requests : %lu\n",
+ core->clock_request_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, tmpbuf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (tmpbuf != NULL)
+ kfree(tmpbuf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_stat_fops() - File operations for B2R2 statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_stat_read,
+};
+
+
+/**
+ * debugfs_b2r2_clock_read() - Implements debugfs read for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /* 10 characters hex number + newline + string terminator; */
+ char tmpbuf[10+2];
+ size_t dev_size;
+ int ret = 0;
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ unsigned long value = clk_get_rate(core->b2r2_clock);
+
+ dev_size = sprintf(tmpbuf, "%#010lx\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, tmpbuf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_write() - Implements debugfs write for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_clock_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char tmpbuf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ if (count >= sizeof(tmpbuf))
+ count = sizeof(tmpbuf) - 1;
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EINVAL;
+ tmpbuf[count] = 0;
+ if (sscanf(tmpbuf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ /*not working yet*/
+ /*clk_set_rate(b2r2_core.b2r2_clock, (unsigned long) reg_value);*/
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_fops() - File operations for PMU B2R2 clock debugfs
+ */
+static const struct file_operations debugfs_b2r2_clock_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_clock_read,
+ .write = debugfs_b2r2_clock_write,
+};
+
+/**
+ * debugfs_b2r2_enabled_read() - Implements debugfs read for
+ * B2R2 Core Enable/Disable
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_enabled_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /* 4 characters hex number + newline + string terminator; */
+ char tmpbuf[4+2];
+ size_t dev_size;
+ int ret = 0;
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ dev_size = sprintf(tmpbuf, "%02X\n", core->control->enabled);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, tmpbuf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+out:
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_enabled_write() - Implements debugfs write for
+ * B2R2 Core Enable/Disable
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_enabled_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char tmpbuf[80];
+ unsigned int enable;
+ int ret = 0;
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ if (count >= sizeof(tmpbuf))
+ count = sizeof(tmpbuf) - 1;
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EINVAL;
+ tmpbuf[count] = 0;
+ if (sscanf(tmpbuf, "%02X", &enable) != 1)
+ return -EINVAL;
+
+ if (enable)
+ core->control->enabled = true;
+ else
+ core->control->enabled = false;
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_enabled_fops() - File operations for B2R2 Core Enable/Disable debugfs
+ */
+static const struct file_operations debugfs_b2r2_enabled_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_enabled_read,
+ .write = debugfs_b2r2_enabled_write,
+};
+
+#endif
+
+/**
+ *
+ * init_hw() - B2R2 Hardware reset & initiliaze
+ *
+ * @pdev: B2R2 platform device
+ *
+ * 1)Register interrupt handler
+ *
+ * 2)B2R2 Register map
+ *
+ * 3)For resetting B2R2 hardware,write to B2R2 Control register the
+ * B2R2BLT_CTLGLOBAL_soft_reset and then polling for on
+ * B2R2 status register for B2R2BLT_STA1BDISP_IDLE flag.
+ *
+ * 4)Wait for B2R2 hardware to be idle (on a timeout rather than while loop)
+ *
+ * 5)Driver status reset
+ *
+ * 6)Recover from any error without any leaks.
+ *
+ */
+static int init_hw(struct b2r2_core *core)
+{
+ int result = 0;
+ u32 uTimeOut = B2R2_RESET_TIMEOUT_VALUE;
+
+ /* Put B2R2 into reset */
+ clear_interrupts(core);
+
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+
+ /* Set up interrupt handler */
+ result = request_irq(core->irq, b2r2_irq_handler, IRQF_SHARED,
+ "b2r2-interrupt", core);
+ if (result) {
+ b2r2_log_err(core->dev,
+ "%s: failed to register IRQ for B2R2\n", __func__);
+ goto b2r2_init_request_irq_failed;
+ }
+
+ b2r2_log_info(core->dev, "do a global reset..\n");
+
+ /* Release reset */
+ writel(0x00000000, &core->hw->BLT_CTL);
+
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&core->hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+ if (uTimeOut == 0) {
+ b2r2_log_err(core->dev,
+ "%s: B2R2 not idle after SW reset\n", __func__);
+ result = -EAGAIN;
+ goto b2r2_core_init_hw_timeout;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs files for register access */
+ if (!IS_ERR_OR_NULL(core->debugfs_core_root_dir) &&
+ IS_ERR_OR_NULL(core->debugfs_regs_dir)) {
+ core->debugfs_regs_dir = debugfs_create_dir("regs",
+ core->debugfs_core_root_dir);
+ }
+ if (!IS_ERR_OR_NULL(core->debugfs_regs_dir)) {
+ int i;
+ debugfs_create_file("all", 0666, core->debugfs_regs_dir,
+ (void *)core->hw, &debugfs_b2r2_regs_fops);
+ /* Create debugfs entries for all static registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++)
+ debugfs_create_file(debugfs_regs[i].name, 0666,
+ core->debugfs_regs_dir,
+ (void *)(((u8 *) core->hw) +
+ debugfs_regs[i].offset),
+ &debugfs_b2r2_reg_fops);
+ }
+#endif
+
+ b2r2_log_info(core->dev, "%s ended..\n", __func__);
+ return result;
+
+/** Recover from any error without any leaks */
+b2r2_core_init_hw_timeout:
+ /** Free B2R2 interrupt handler */
+ free_irq(core->irq, core);
+
+b2r2_init_request_irq_failed:
+ if (core->hw)
+ iounmap(core->hw);
+ core->hw = NULL;
+
+ return result;
+}
+
+
+/**
+ * exit_hw() - B2R2 Hardware exit
+ *
+ * core->lock _must_ NOT be held
+ */
+static void exit_hw(struct b2r2_core *core)
+{
+ unsigned long flags;
+
+ b2r2_log_info(core->dev, "%s started..\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Unregister our debugfs entries */
+ if (!IS_ERR_OR_NULL(core->debugfs_regs_dir)) {
+ debugfs_remove_recursive(core->debugfs_regs_dir);
+ core->debugfs_regs_dir = NULL;
+ }
+#endif
+ b2r2_log_debug(core->dev, "%s: locking core->lock\n", __func__);
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Cancel all pending jobs */
+ b2r2_log_debug(core->dev, "%s: canceling pending jobs\n", __func__);
+ exit_job_list(core, &core->prio_queue);
+
+ /* Soft reset B2R2 (Close all DMA,
+ reset all state to idle, reset regs)*/
+ b2r2_log_debug(core->dev, "%s: putting b2r2 in reset\n", __func__);
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+
+ b2r2_log_debug(core->dev, "%s: clearing interrupts\n", __func__);
+ clear_interrupts(core);
+
+ /** Free B2R2 interrupt handler */
+ b2r2_log_debug(core->dev, "%s: freeing interrupt handler\n", __func__);
+ free_irq(core->irq, core);
+
+ b2r2_log_debug(core->dev, "%s: unlocking core->lock\n", __func__);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ b2r2_log_info(core->dev, "%s ended...\n", __func__);
+}
+
+/**
+ * b2r2_probe() - This routine loads the B2R2 core driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res = NULL;
+ struct b2r2_core *core = NULL;
+ struct b2r2_control *control = NULL;
+ struct b2r2_platform_data *pdata = NULL;
+ int debug_init = 0;
+
+ BUG_ON(pdev == NULL);
+ BUG_ON(pdev->id < 0 || pdev->id >= B2R2_MAX_NBR_DEVICES);
+
+ pdata = pdev->dev.platform_data;
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ dev_err(&pdev->dev, "b2r2 core alloc failed\n");
+ ret = -EINVAL;
+ goto error_exit;
+ }
+
+ core->dev = &pdev->dev;
+ dev_set_drvdata(core->dev, core);
+ if (pdev->id)
+ snprintf(core->name, sizeof(core->name), "b2r2_%d", pdev->id);
+ else
+ snprintf(core->name, sizeof(core->name), "b2r2");
+
+ dev_info(&pdev->dev, "init started.\n");
+
+ /* Init spin locks */
+ spin_lock_init(&core->lock);
+
+ /* Init job queues */
+ INIT_LIST_HEAD(&core->prio_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Create work queue for callbacks & timeout */
+ INIT_DELAYED_WORK(&core->timeout_work, timeout_work_function);
+#endif
+
+ /* Work queue for callbacks and timeout management */
+ core->work_queue = create_workqueue("B2R2");
+ if (!core->work_queue) {
+ ret = -ENOMEM;
+ goto error_exit;
+ }
+
+ /* Get the clock for B2R2 */
+ core->b2r2_clock = clk_get(core->dev, pdata->clock_id);
+ if (IS_ERR(core->b2r2_clock)) {
+ ret = PTR_ERR(core->b2r2_clock);
+ dev_err(&pdev->dev, "clk_get %s failed\n", pdata->clock_id);
+ goto error_exit;
+ }
+
+ /* Get the B2R2 regulator */
+ core->b2r2_reg = regulator_get(core->dev, pdata->regulator_id);
+ if (IS_ERR(core->b2r2_reg)) {
+ ret = PTR_ERR(core->b2r2_reg);
+ dev_err(&pdev->dev, "regulator_get %s failed "
+ "(dev_name=%s)\n", pdata->regulator_id,
+ dev_name(core->dev));
+ goto error_exit;
+ }
+
+ /* Init power management */
+ mutex_init(&core->domain_lock);
+ INIT_DELAYED_WORK_DEFERRABLE(&core->domain_disable_work,
+ domain_disable_work_function);
+ core->domain_enabled = false;
+ core->valid = false;
+
+ /* Map B2R2 into kernel virtual memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ goto error_exit;
+
+ /* Hook up irq */
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq <= 0) {
+ dev_err(&pdev->dev, "%s: Failed to request irq (irq=%d)\n",
+ __func__, core->irq);
+ goto error_exit;
+ }
+
+ core->hw = (struct b2r2_memory_map *) ioremap(res->start,
+ res->end - res->start + 1);
+ if (core->hw == NULL) {
+ dev_err(&pdev->dev, "%s: ioremap failed\n", __func__);
+ ret = -ENOMEM;
+ goto error_exit;
+ }
+
+ dev_dbg(core->dev, "b2r2 structure address %p\n", core->hw);
+
+ control = kzalloc(sizeof(*control), GFP_KERNEL);
+ if (!control) {
+ dev_err(&pdev->dev, "b2r2 control alloc failed\n");
+ ret = -EINVAL;
+ goto error_exit;
+ }
+
+ control->data = (void *)core;
+ control->id = pdev->id;
+ control->dev = &pdev->dev; /* Temporary device */
+
+ core->op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT;
+ core->ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT;
+ core->pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT;
+ core->mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT;
+ core->min_req_time = 0;
+
+#ifdef CONFIG_DEBUG_FS
+ core->debugfs_root_dir = debugfs_create_dir(core->name, NULL);
+ if (!IS_ERR_OR_NULL(core->debugfs_root_dir)) {
+ core->debugfs_core_root_dir = debugfs_create_dir("core",
+ core->debugfs_root_dir);
+ control->debugfs_debug_root_dir = debugfs_create_dir("debug",
+ core->debugfs_root_dir);
+ control->mem_heap.debugfs_root_dir = debugfs_create_dir("mem",
+ core->debugfs_root_dir);
+ control->debugfs_root_dir = debugfs_create_dir("blt",
+ core->debugfs_root_dir);
+ }
+
+ if (!IS_ERR_OR_NULL(core->debugfs_core_root_dir)) {
+ debugfs_create_file("stats", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_stat_fops);
+ debugfs_create_file("clock", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_clock_fops);
+ debugfs_create_file("enabled", 0666,
+ core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_enabled_fops);
+ debugfs_create_u8("op_size", 0666, core->debugfs_core_root_dir,
+ &core->op_size);
+ debugfs_create_u8("ch_size", 0666, core->debugfs_core_root_dir,
+ &core->ch_size);
+ debugfs_create_u8("pg_size", 0666, core->debugfs_core_root_dir,
+ &core->pg_size);
+ debugfs_create_u8("mg_size", 0666, core->debugfs_core_root_dir,
+ &core->mg_size);
+ debugfs_create_u16("min_req_time", 0666,
+ core->debugfs_core_root_dir, &core->min_req_time);
+ }
+#endif
+
+ ret = b2r2_debug_init(control);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "b2r2_debug_init failed\n");
+ goto error_exit;
+ }
+ debug_init = 1;
+
+ /* Initialize b2r2_control */
+ ret = b2r2_control_init(control);
+ if (ret < 0) {
+ b2r2_log_err(&pdev->dev, "b2r2_control_init failed\n");
+ goto error_exit;
+ }
+ core->control = control;
+
+ /* Add the control to the blitter */
+ kref_init(&control->ref);
+ control->enabled = true;
+ b2r2_blt_add_control(control);
+
+ b2r2_core[pdev->id] = core;
+ dev_info(&pdev->dev, "%s done.\n", __func__);
+
+ return ret;
+
+/** Recover from any error if something fails */
+error_exit:
+ kfree(control);
+
+ if (!IS_ERR_OR_NULL(core->b2r2_reg))
+ regulator_put(core->b2r2_reg);
+
+ if (!IS_ERR_OR_NULL(core->b2r2_clock))
+ clk_put(core->b2r2_clock);
+
+ if (!IS_ERR_OR_NULL(core->work_queue))
+ destroy_workqueue(core->work_queue);
+
+ if (debug_init)
+ b2r2_debug_exit();
+
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR_OR_NULL(core->debugfs_root_dir)) {
+ debugfs_remove_recursive(core->debugfs_root_dir);
+ core->debugfs_root_dir = NULL;
+ }
+#endif
+ kfree(core);
+
+ dev_info(&pdev->dev, "%s done with errors (%d).\n", __func__, ret);
+
+ return ret;
+}
+
+void b2r2_core_release(struct kref *control_ref)
+{
+ struct b2r2_control *control = container_of(
+ control_ref, struct b2r2_control, ref);
+ struct b2r2_core *core = control->data;
+ int id = control->id;
+ unsigned long flags;
+#ifdef CONFIG_B2R2_DEBUG
+ struct device *dev = core->dev;
+#endif
+
+ b2r2_log_info(dev, "%s: enter\n", __func__);
+
+ /* Exit b2r2 control module */
+ b2r2_control_exit(control);
+ kfree(control);
+ b2r2_debug_exit();
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&core->timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(core->work_queue);
+
+ /* Make sure the power is turned off */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ /** Unmap B2R2 registers */
+ b2r2_log_info(dev, "%s: unmap b2r2 registers..\n", __func__);
+ if (core->hw) {
+ iounmap(core->hw);
+ core->hw = NULL;
+ }
+
+ destroy_workqueue(core->work_queue);
+
+ spin_lock_irqsave(&core->lock, flags);
+ core->work_queue = NULL;
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Return the clock */
+ clk_put(core->b2r2_clock);
+ regulator_put(core->b2r2_reg);
+
+ core->dev = NULL;
+ kfree(core);
+ b2r2_core[id] = NULL;
+
+ b2r2_log_info(dev, "%s: exit\n", __func__);
+}
+
+
+/**
+ * b2r2_remove - This routine unloads b2r2 driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_remove(struct platform_device *pdev)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(&pdev->dev, "%s: Started\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR_OR_NULL(core->debugfs_root_dir)) {
+ debugfs_remove_recursive(core->debugfs_root_dir);
+ core->debugfs_root_dir = NULL;
+ }
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(core->work_queue);
+
+ /* Remove control from blitter */
+ core->control->enabled = false;
+ b2r2_blt_remove_control(core->control);
+ kref_put(&core->control->ref, b2r2_core_release);
+
+ b2r2_log_info(&pdev->dev, "%s: Ended\n", __func__);
+
+ return 0;
+}
+/**
+ * b2r2_suspend() - This routine puts the B2R2 device in to sustend state.
+ * @pdev: platform device.
+ *
+ * This routine stores the current state of the b2r2 device and puts in to
+ * suspend state.
+ *
+ */
+int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(core->work_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&core->timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(core->work_queue);
+
+ /* Make sure power is turned off */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ return 0;
+}
+
+
+/**
+ * b2r2_resume() - This routine resumes the B2R2 device from sustend state.
+ * @pdev: platform device.
+ *
+ * This routine restore back the current state of the b2r2 device resumes.
+ *
+ */
+int b2r2_resume(struct platform_device *pdev)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+void b2r2_core_print_stats(struct b2r2_core *core)
+{
+ b2r2_log_info(core->dev,
+ "%s: n_irq %ld, n_irq_exit %ld, n_irq_skipped %ld,\n"
+ "n_jobs_added %ld, n_active_jobs %ld, "
+ "n_jobs_in_prio_list %ld,\n"
+ "n_jobs_removed %ld\n",
+ __func__,
+ core->stat_n_irq,
+ core->stat_n_irq_exit,
+ core->stat_n_irq_skipped,
+ core->stat_n_jobs_added,
+ core->n_active_jobs,
+ core->stat_n_jobs_in_prio_list,
+ core->stat_n_jobs_removed);
+}
+
+/**
+ * struct platform_b2r2_driver - Platform driver configuration for the
+ * B2R2 core driver
+ */
+static struct platform_driver platform_b2r2_driver = {
+ .remove = b2r2_remove,
+ .driver = {
+ .name = "b2r2",
+ },
+ /** TODO implement power mgmt functions */
+ .suspend = b2r2_suspend,
+ .resume = b2r2_resume,
+};
+
+
+/**
+ * b2r2_init() - Module init function for the B2R2 core module
+ */
+static int __init b2r2_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return platform_driver_probe(&platform_b2r2_driver, b2r2_probe);
+}
+module_init(b2r2_init);
+
+/**
+ * b2r2_exit() - Module exit function for the B2R2 core module
+ */
+static void __exit b2r2_exit(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ platform_driver_unregister(&platform_b2r2_driver);
+ return;
+}
+module_exit(b2r2_exit);
+
+
+/** Module is having GPL license */
+
+MODULE_LICENSE("GPL");
+
+/** Module author & discription */
+
+MODULE_AUTHOR("Robert Fekete (robert.fekete@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Core driver");
diff --git a/drivers/video/b2r2/b2r2_core.h b/drivers/video/b2r2/b2r2_core.h
new file mode 100644
index 00000000000..5b9fdcdc2bb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_CORE_H__
+#define __B2R2_CORE_H__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/**
+ * B2R2_RESET_TIMEOUT_VALUE - The number of times to read the status register
+ * waiting for b2r2 to go idle after soft reset.
+ */
+#define B2R2_RESET_TIMEOUT_VALUE (1500)
+
+/**
+ * B2R2_CLK_FLAG - Value to write into clock reg to turn clock on
+ */
+#define B2R2_CLK_FLAG (0x125)
+
+/**
+ * DEBUG_CHECK_ADDREF_RELEASE - Define this to enable addref / release debug
+ */
+#define DEBUG_CHECK_ADDREF_RELEASE 1
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * HANDLE_TIMEOUTED_JOBS - Define this to check jobs for timeout and cancel them
+ */
+#define HANDLE_TIMEOUTED_JOBS
+#define JOB_TIMEOUT (HZ/2)
+#endif
+
+/**
+ * B2R2_CLOCK_ALWAYS_ON - Define this to disable power save clock turn off
+ */
+/* #define B2R2_CLOCK_ALWAYS_ON 1 */
+
+/**
+ * START_SENTINEL - Watch guard to detect job overwrites
+ */
+#define START_SENTINEL 0xBABEDEEA
+
+/**
+ * STOP_SENTINEL - Watch guard to detect job overwrites
+ */
+#define END_SENTINEL 0xDADBDCDD
+
+/**
+ * B2R2_CORE_LOWEST_PRIO - Lowest prio allowed
+ */
+#define B2R2_CORE_LOWEST_PRIO -19
+/**
+ * B2R2_CORE_HIGHEST_PRIO - Highest prio allowed
+ */
+#define B2R2_CORE_HIGHEST_PRIO 20
+
+/**
+ * B2R2_DOMAIN_DISABLE -
+ */
+#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100)
+
+/**
+ * B2R2_REGULATOR_RETRY_COUNT -
+ */
+#define B2R2_REGULATOR_RETRY_COUNT 10
+
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+
+/**
+ * struct addref_release - Represents one addref or release. Used
+ * to debug addref / release problems
+ *
+ * @addref: true if this represents an addref else it represents
+ * a release.
+ * @job: The job that was referenced
+ * @caller: The caller of the addref or release
+ * @ref_count: The job reference count after addref / release
+ */
+struct addref_release {
+ bool addref;
+ struct b2r2_core_job *job;
+ const char *caller;
+ int ref_count;
+};
+
+#endif
+
+/**
+ * struct b2r2_core - Administration data for B2R2 core
+ *
+ * @lock: Spin lock protecting the b2r2_core structure and the B2R2 HW
+ * @hw: B2R2 registers memory mapped
+ * @pmu_b2r2_clock: Control of B2R2 clock
+ * @log_dev: Device used for logging via dev_... functions
+ *
+ * @prio_queue: Queue of jobs sorted in priority order
+ * @active_jobs: Array containing pointer to zero or one job per queue
+ * @n_active_jobs: Number of active jobs
+ * @jiffies_last_active: jiffie value when adding last active job
+ * @jiffies_last_irq: jiffie value when last irq occured
+ * @timeout_work: Work structure for timeout work
+ *
+ * @next_job_id: Contains the job id that will be assigned to the next
+ * added job.
+ *
+ * @clock_request_count: When non-zero, clock is on
+ * @clock_off_timer: Kernel timer to handle delayed turn off of clock
+ *
+ * @work_queue: Work queue to handle done jobs (callbacks) and timeouts in
+ * non-interrupt context.
+ *
+ * @stat_n_irq: Number of interrupts (statistics)
+ * @stat_n_jobs_added: Number of jobs added (statistics)
+ * @stat_n_jobs_removed: Number of jobs removed (statistics)
+ * @stat_n_jobs_in_prio_list: Number of jobs in prio list (statistics)
+ *
+ * @debugfs_root_dir: Root directory for B2R2 debugfs
+ *
+ * @ar: Circular array of addref / release debug structs
+ * @ar_write: Where next write will occur
+ * @ar_read: First valid place to read. When ar_read == ar_write then
+ * the array is empty.
+ */
+struct b2r2_core {
+ spinlock_t lock;
+
+ struct b2r2_memory_map *hw;
+
+ u8 op_size;
+ u8 ch_size;
+ u8 pg_size;
+ u8 mg_size;
+ u16 min_req_time;
+ int irq;
+
+ char name[16];
+ struct device *dev;
+
+ struct list_head prio_queue;
+
+ struct b2r2_core_job *active_jobs[B2R2_CORE_QUEUE_NO_OF];
+ unsigned long n_active_jobs;
+
+ unsigned long jiffies_last_active;
+ unsigned long jiffies_last_irq;
+#ifdef HANDLE_TIMEOUTED_JOBS
+ struct delayed_work timeout_work;
+#endif
+ int next_job_id;
+
+ unsigned long clock_request_count;
+ struct timer_list clock_off_timer;
+
+ struct workqueue_struct *work_queue;
+
+ /* Statistics */
+ unsigned long stat_n_irq_exit;
+ unsigned long stat_n_irq_skipped;
+ unsigned long stat_n_irq;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_removed;
+
+ unsigned long stat_n_jobs_in_prio_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_core_root_dir;
+ struct dentry *debugfs_regs_dir;
+#endif
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Tracking release bug...*/
+ struct addref_release ar[100];
+ int ar_write;
+ int ar_read;
+#endif
+
+ /* Power management variables */
+ struct mutex domain_lock;
+ struct delayed_work domain_disable_work;
+
+ /*
+ * We need to keep track of both the number of domain_enable/disable()
+ * calls and whether the power was actually turned off, since the
+ * power off is done in a delayed job.
+ */
+ bool domain_enabled;
+ volatile bool valid;
+ int domain_request_count;
+
+ struct clk *b2r2_clock;
+ struct regulator *b2r2_reg;
+
+ struct b2r2_control *control;
+};
+
+/**
+ * b2r2_core_job_add() - Adds a job to B2R2 job queues
+ *
+ * The job reference count will be increased after this function
+ * has been called and b2r2_core_job_release() must be called to
+ * release the reference. The job callback function will be always
+ * be called after the job is done or cancelled.
+ *
+ * @control: The b2r2 control entity
+ * @job: Job to be added
+ *
+ * Returns 0 if OK else negative error code
+ *
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_wait() - Waits for an added job to be done.
+ *
+ * @job: Job to wait for
+ *
+ * Returns 0 if job done else negative error code
+ *
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_cancel() - Cancel an already added job.
+ *
+ * @job: Job to cancel
+ *
+ * Returns 0 if job cancelled or done else negative error code
+ *
+ */
+int b2r2_core_job_cancel(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_find() - Finds job with given job id
+ *
+ * Reference count will be increased for the found job
+ *
+ * @control: The b2r2 control entity
+ * @job_id: Job id to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id);
+
+/**
+ * b2r2_core_job_find_first_with_tag() - Finds first job with given tag
+ *
+ * Reference count will be increased for the found job.
+ * This function can be used to find all jobs for a client, i.e.
+ * when cancelling all jobs for a client.
+ *
+ * @control: The b2r2 control entity
+ * @tag: Tag to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag);
+
+/**
+ * b2r2_core_job_addref() - Increase the job reference count.
+ *
+ * @job: Job to increase reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller);
+
+/**
+ * b2r2_core_job_release() - Decrease the job reference count. The
+ * job will be released (the release() function
+ * will be called) when the reference count
+ * reaches zero.
+ *
+ * @job: Job to decrease reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller);
+
+void b2r2_core_print_stats(struct b2r2_core *core);
+
+void b2r2_core_release(struct kref *control_ref);
+
+#endif /* !defined(__B2R2_CORE_JOB_H__) */
diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c
new file mode 100644
index 00000000000..934ba938ee5
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+static struct dentry *log_lvl_dir;
+static int module_init;
+
+#define CHARS_IN_NODE_DUMP 1544
+#define DUMPED_NODE_SIZE (CHARS_IN_NODE_DUMP * sizeof(char) + 1)
+
+static void dump_node(char *dst, struct b2r2_node *node)
+{
+ dst += sprintf(dst, "node 0x%08x ------------------\n",
+ (unsigned int)node);
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_NIP:", node->node.GROUP0.B2R2_NIP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CIC:", node->node.GROUP0.B2R2_CIC);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_INS:", node->node.GROUP0.B2R2_INS);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_ACK:", node->node.GROUP0.B2R2_ACK);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TBA:", node->node.GROUP1.B2R2_TBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TTY:", node->node.GROUP1.B2R2_TTY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TXY:", node->node.GROUP1.B2R2_TXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TSZ:", node->node.GROUP1.B2R2_TSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1CF:", node->node.GROUP2.B2R2_S1CF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2CF:", node->node.GROUP2.B2R2_S2CF);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1BA:", node->node.GROUP3.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1TY:", node->node.GROUP3.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1XY:", node->node.GROUP3.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1SZ:", node->node.GROUP3.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2BA:", node->node.GROUP4.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2TY:", node->node.GROUP4.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2XY:", node->node.GROUP4.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2SZ:", node->node.GROUP4.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3BA:", node->node.GROUP5.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3TY:", node->node.GROUP5.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3XY:", node->node.GROUP5.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3SZ:", node->node.GROUP5.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWO:", node->node.GROUP6.B2R2_CWO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWS:", node->node.GROUP6.B2R2_CWS);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CCO:", node->node.GROUP7.B2R2_CCO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CML:", node->node.GROUP7.B2R2_CML);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_PMK:", node->node.GROUP8.B2R2_PMK);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FCTL:", node->node.GROUP8.B2R2_FCTL);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RSF:", node->node.GROUP9.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RZI:", node->node.GROUP9.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_HFP:", node->node.GROUP9.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_VFP:", node->node.GROUP9.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RSF:", node->node.GROUP10.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RZI:", node->node.GROUP10.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_HFP:", node->node.GROUP10.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_VFP:", node->node.GROUP10.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF0:", node->node.GROUP11.B2R2_FF0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF1:", node->node.GROUP11.B2R2_FF1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF2:", node->node.GROUP11.B2R2_FF2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF3:", node->node.GROUP11.B2R2_FF3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY1:", node->node.GROUP12.B2R2_KEY1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY2:", node->node.GROUP12.B2R2_KEY2);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYL:", node->node.GROUP13.B2R2_XYL);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYP:", node->node.GROUP13.B2R2_XYP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_SAR:", node->node.GROUP14.B2R2_SAR);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_USR:", node->node.GROUP14.B2R2_USR);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX0:", node->node.GROUP15.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX1:", node->node.GROUP15.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX2:", node->node.GROUP15.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX3:", node->node.GROUP15.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX0:", node->node.GROUP16.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX1:", node->node.GROUP16.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX2:", node->node.GROUP16.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX3:", node->node.GROUP16.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+}
+
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ struct b2r2_node **dst_node;
+ unsigned int node_count = 0;
+
+ while (node != NULL) {
+ node_count++;
+ node = node->next;
+ }
+
+ mutex_lock(&cont->last_job_lock);
+
+ if (cont->last_job) {
+ node = cont->last_job;
+ while (node != NULL) {
+ struct b2r2_node *tmp = node->next;
+ kfree(node);
+ node = tmp;
+ }
+ cont->last_job = NULL;
+ }
+
+ node = first_node;
+ dst_node = &cont->last_job;
+ while (node != NULL) {
+ *dst_node = kzalloc(sizeof(**dst_node), GFP_KERNEL);
+ if (!(*dst_node))
+ goto last_job_alloc_failed;
+
+ memcpy(*dst_node, node, sizeof(**dst_node));
+
+ dst_node = &((*dst_node)->next);
+ node = node->next;
+ }
+
+ mutex_unlock(&cont->last_job_lock);
+
+ return;
+
+last_job_alloc_failed:
+ mutex_unlock(&cont->last_job_lock);
+
+ while (cont->last_job != NULL) {
+ struct b2r2_node *tmp = cont->last_job->next;
+ kfree(cont->last_job);
+ cont->last_job = tmp;
+ }
+
+ return;
+}
+
+static ssize_t last_job_read(struct file *filp, char __user *buf,
+ size_t bytes, loff_t *off)
+{
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+ struct b2r2_node *node = cont->last_job;
+ int node_count = 0;
+ int i;
+
+ size_t size;
+ size_t count;
+ loff_t offs = *off;
+
+ for (; node != NULL; node = node->next)
+ node_count++;
+
+ size = node_count * DUMPED_NODE_SIZE;
+
+ if (node_count != cont->prev_node_count) {
+ kfree(cont->last_job_chars);
+
+ cont->last_job_chars = kzalloc(size, GFP_KERNEL);
+ if (!cont->last_job_chars)
+ return 0;
+ cont->prev_node_count = node_count;
+ }
+
+ mutex_lock(&cont->last_job_lock);
+ node = cont->last_job;
+ for (i = 0; i < node_count; i++) {
+ BUG_ON(node == NULL);
+ dump_node(cont->last_job_chars +
+ i * DUMPED_NODE_SIZE/sizeof(char),
+ node);
+ node = node->next;
+ }
+ mutex_unlock(&cont->last_job_lock);
+
+ if (offs > size)
+ return 0;
+
+ if (offs + bytes > size)
+ count = size - offs;
+ else
+ count = bytes;
+
+ if (copy_to_user(buf, cont->last_job_chars + offs, count))
+ return -EFAULT;
+
+ *off = offs + count;
+ return count;
+}
+
+static const struct file_operations last_job_fops = {
+ .read = last_job_read,
+};
+
+int b2r2_debug_init(struct b2r2_control *cont)
+{
+ int i;
+
+ if (!module_init) {
+ for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
+ b2r2_log_levels[i] = 0;
+
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ /*
+ * If dynamic debug is disabled we need some other way to
+ * control the log prints
+ */
+ log_lvl_dir = debugfs_create_dir("b2r2_log", NULL);
+
+ /* No need to save the files,
+ * they will be removed recursively */
+ if (!IS_ERR_OR_NULL(log_lvl_dir)) {
+ (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
+ (void)debugfs_create_bool("info", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
+ (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
+ (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
+ }
+
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+ /* log_lvl_dir is never used */
+ (void)log_lvl_dir;
+#endif
+ module_init++;
+ }
+
+ if (!IS_ERR_OR_NULL(cont->debugfs_debug_root_dir)) {
+ /* No need to save the file,
+ * it will be removed recursively */
+ (void)debugfs_create_file("last_job", 0444,
+ cont->debugfs_debug_root_dir, cont,
+ &last_job_fops);
+ }
+
+ mutex_init(&cont->last_job_lock);
+
+ return 0;
+}
+
+void b2r2_debug_exit(void)
+{
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ module_init--;
+ if (!module_init && !IS_ERR_OR_NULL(log_lvl_dir)) {
+ debugfs_remove_recursive(log_lvl_dir);
+ log_lvl_dir = NULL;
+ }
+#endif
+}
diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h
new file mode 100644
index 00000000000..1b1ac83f6cb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+
+#include <linux/device.h>
+
+#include "b2r2_internal.h"
+
+#ifdef CONFIG_B2R2_DEBUG
+
+/* Log macros */
+enum b2r2_log_levels {
+ B2R2_LOG_LEVEL_WARN,
+ B2R2_LOG_LEVEL_INFO,
+ B2R2_LOG_LEVEL_DEBUG,
+ B2R2_LOG_LEVEL_REGDUMP,
+ B2R2_LOG_LEVEL_COUNT,
+};
+
+/*
+ * Booleans controlling the different log levels. The different log levels are
+ * enabled separately (i.e. you can have info prints without the warn prints).
+ */
+extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+
+#define b2r2_log_err(b2r2_log_dev, ...) do { \
+ dev_err(b2r2_log_dev, __VA_ARGS__); \
+ } while (0)
+
+/* If dynamic debug is enabled it should be used instead of loglevels */
+#ifdef CONFIG_DYNAMIC_DEBUG
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#else
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \
+ dev_warn(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \
+ dev_info(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \
+ dev_vdbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#endif
+
+int b2r2_debug_init(struct b2r2_control *cont);
+void b2r2_debug_exit(void);
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node);
+
+#else
+
+#define b2r2_log_err(...)
+#define b2r2_log_warn(...)
+#define b2r2_log_info(...)
+#define b2r2_log_debug(...)
+#define b2r2_log_regdump(...)
+
+static inline int b2r2_debug_init(struct b2r2_control *cont)
+{
+ return 0;
+}
+static inline void b2r2_debug_exit(void)
+{
+ return;
+}
+static inline void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node)
+{
+ return;
+}
+
+#endif
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_filters.c b/drivers/video/b2r2/b2r2_filters.c
new file mode 100644
index 00000000000..a93eb60d91f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include "b2r2_filters.h"
+#include "b2r2_internal.h"
+#include "b2r2_debug.h"
+
+/**
+ * struct b2r2_filter_spec filters[] - Filter lookup table
+ *
+ * Lookup table for filters for different scale factors. A filter
+ * will be selected according to "min < scale_factor <= max".
+ */
+static struct b2r2_filter_spec filters[] = {
+ {
+ .min = 1024,
+ .max = 1433,
+ .h_coeffs = {
+ 0x00, 0x00, 0xFA, 0x09, 0x34, 0x09, 0x00, 0x00,
+ 0x00, 0x00, 0xF9, 0x10, 0x32, 0x06, 0xFF, 0x00,
+ 0x00, 0x00, 0xF8, 0x17, 0x2F, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0xF7, 0x20, 0x2A, 0xFF, 0x00, 0x00,
+ 0x00, 0x00, 0xF8, 0x27, 0x25, 0xFC, 0x00, 0x00,
+ 0x00, 0x00, 0xFA, 0x2D, 0x1D, 0xFC, 0x00, 0x00,
+ 0x00, 0x00, 0xFE, 0x31, 0x15, 0xFC, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x35, 0x0D, 0xFC, 0x00, 0x00
+ },
+ .v_coeffs = {
+ 0x00, 0x02, 0x3C, 0x02, 0x00,
+ 0x00, 0x08, 0x3B, 0xFD, 0x00,
+ 0x00, 0x10, 0x35, 0xFB, 0x00,
+ 0x00, 0x18, 0x30, 0xF8, 0x00,
+ 0x00, 0x1F, 0x27, 0xFA, 0x00,
+ 0x00, 0x27, 0x1E, 0xFB, 0x00,
+ 0x00, 0x2E, 0x16, 0xFC, 0x00,
+ 0x00, 0x34, 0x0D, 0xFF, 0x00
+ },
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .h_coeffs = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ },
+ .v_coeffs = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ },
+ },
+ {
+ .min = 1536,
+ .max = 3072,
+ .h_coeffs = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ },
+ .v_coeffs = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ },
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .h_coeffs = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ },
+ .v_coeffs = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ },
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .h_coeffs = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ },
+ .v_coeffs = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ },
+ },
+};
+static const size_t filters_size = sizeof(filters)/sizeof(filters[0]);
+
+/**
+ * struct b2r2_filter_spec bilinear_filter - A bilinear filter
+ *
+ * The bilinear filter will be used if no custom filters are specified, or
+ * for upscales not matching any filter in the lookup table.
+ */
+static struct b2r2_filter_spec bilinear_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x08, 0x3e, 0xfb, 0x00, 0x00,
+ 0x00, 0x00, 0xfb, 0x13, 0x3b, 0xf7, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x1f, 0x34, 0xf5, 0x00, 0x00,
+ 0x00, 0x00, 0xf6, 0x2b, 0x2a, 0xf5, 0x00, 0x00,
+ 0x00, 0x00, 0xf6, 0x35, 0x1e, 0xf7, 0x00, 0x00,
+ 0x00, 0x00, 0xf9, 0x3c, 0x12, 0xf9, 0x00, 0x00,
+ 0x00, 0x00, 0xfd, 0x3f, 0x07, 0xfd, 0x00, 0x00
+ },
+ .v_coeffs = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x00, 0x09, 0x3d, 0xfa, 0x00,
+ 0x00, 0x13, 0x39, 0xf4, 0x00,
+ 0x00, 0x1e, 0x31, 0xf1, 0x00,
+ 0x00, 0x27, 0x28, 0xf1, 0x00,
+ 0x00, 0x31, 0x1d, 0xf2, 0x00,
+ 0x00, 0x38, 0x12, 0xf6, 0x00,
+ 0x00, 0x3d, 0x07, 0xfc, 0x00
+ },
+};
+
+/**
+ * struct b2r2_filter_spec default_downscale_filter - Default filter for downscale
+ *
+ * The default downscale filter will be used for downscales not matching any
+ * filter in the lookup table.
+ */
+static struct b2r2_filter_spec default_downscale_filter = {
+ .min = 1 << 10,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ },
+ .v_coeffs = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ },
+};
+
+/**
+ * struct b2r2_filter_spec blur_filter - Blur filter
+ *
+ * Filter for blurring an image.
+ */
+static struct b2r2_filter_spec blur_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08
+ },
+ .v_coeffs = {
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c
+ },
+};
+
+/* Private function declarations */
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+
+/* Public functions */
+
+int b2r2_filters_init(struct b2r2_control *cont)
+{
+ int i;
+
+ if (cont->filters_initialized)
+ return 0;
+
+ for (i = 0; i < filters_size; i++) {
+ alloc_filter_coeffs(cont->dev, &filters[i]);
+ }
+
+ alloc_filter_coeffs(cont->dev, &bilinear_filter);
+ alloc_filter_coeffs(cont->dev, &default_downscale_filter);
+ alloc_filter_coeffs(cont->dev, &blur_filter);
+
+ cont->filters_initialized = 1;
+
+ return 0;
+}
+
+void b2r2_filters_exit(struct b2r2_control *cont)
+{
+ int i;
+
+ if (!cont->filters_initialized)
+ return;
+
+ for (i = 0; i < filters_size; i++) {
+ free_filter_coeffs(cont->dev, &filters[i]);
+ }
+
+ free_filter_coeffs(cont->dev, &bilinear_filter);
+ free_filter_coeffs(cont->dev, &default_downscale_filter);
+ free_filter_coeffs(cont->dev, &blur_filter);
+
+ cont->filters_initialized = 0;
+}
+
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor)
+{
+ int i;
+ struct b2r2_filter_spec *filter = NULL;
+
+ for (i = 0; i < filters_size; i++) {
+ if ((filters[i].min < scale_factor) &&
+ (scale_factor <= filters[i].max) &&
+ filters[i].h_coeffs_dma_addr &&
+ filters[i].v_coeffs_dma_addr) {
+ filter = &filters[i];
+ break;
+ }
+ }
+
+ if (filter == NULL) {
+ /*
+ * No suitable filter has been found. Use default filters,
+ * bilinear for any upscale.
+ */
+ if (scale_factor < (1 << 10))
+ filter = &bilinear_filter;
+ else
+ filter = &default_downscale_filter;
+ }
+
+ /*
+ * Check so that the coefficients were successfully allocated for this
+ * filter.
+ */
+ if (!filter->h_coeffs_dma_addr || !filter->v_coeffs_dma_addr)
+ return NULL;
+ else
+ return filter;
+}
+
+struct b2r2_filter_spec *b2r2_filter_blur()
+{
+ return &blur_filter;
+}
+
+/* Private functions */
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
+{
+ int ret;
+
+ filter->h_coeffs_dma_addr = dma_alloc_coherent(dev,
+ B2R2_HF_TABLE_SIZE, &(filter->h_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->h_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ filter->v_coeffs_dma_addr = dma_alloc_coherent(dev,
+ B2R2_VF_TABLE_SIZE, &(filter->v_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->v_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs,
+ B2R2_HF_TABLE_SIZE);
+ memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs,
+ B2R2_VF_TABLE_SIZE);
+
+ return 0;
+
+error:
+ free_filter_coeffs(dev, filter);
+ return ret;
+
+}
+
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
+{
+ if (filter->h_coeffs_dma_addr != NULL)
+ dma_free_coherent(dev, B2R2_HF_TABLE_SIZE,
+ filter->h_coeffs_dma_addr,
+ filter->h_coeffs_phys_addr);
+ if (filter->v_coeffs_dma_addr != NULL)
+ dma_free_coherent(dev, B2R2_VF_TABLE_SIZE,
+ filter->v_coeffs_dma_addr,
+ filter->v_coeffs_phys_addr);
+
+ filter->h_coeffs_dma_addr = NULL;
+ filter->h_coeffs_phys_addr = 0;
+ filter->v_coeffs_dma_addr = NULL;
+ filter->v_coeffs_phys_addr = 0;
+}
diff --git a/drivers/video/b2r2/b2r2_filters.h b/drivers/video/b2r2/b2r2_filters.h
new file mode 100644
index 00000000000..790c9ec8ee9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_FILTERS_H
+#define _LINUX_VIDEO_B2R2_FILTERS_H
+
+#include <linux/kernel.h>
+
+#include "b2r2_internal.h"
+
+#define B2R2_HF_TABLE_SIZE 64
+#define B2R2_VF_TABLE_SIZE 40
+
+/**
+ * @struct b2r2_filter_spec - Filter specification structure
+ *
+ * @param min - Minimum scale factor for this filter (in 6.10 fixed point)
+ * @param max - Maximum scale factor for this filter (in 6.10 fixed point)
+ * @param h_coeffs - Horizontal filter coefficients
+ * @param v_coeffs - Vertical filter coefficients
+ * @param h_coeffs_dma_addr - Virtual DMA address for horizontal coefficients
+ * @param v_coeffs_dma_addr - Virtual DMA address for vertical coefficients
+ * @param h_coeffs_phys_addr - Physical address for horizontal coefficients
+ * @param v_coeffs_phys_addr - Physical address for vertical coefficients
+ */
+struct b2r2_filter_spec {
+ const u16 min;
+ const u16 max;
+
+ const u8 h_coeffs[B2R2_HF_TABLE_SIZE];
+ const u8 v_coeffs[B2R2_VF_TABLE_SIZE];
+
+ void *h_coeffs_dma_addr;
+ u32 h_coeffs_phys_addr;
+
+ void *v_coeffs_dma_addr;
+ u32 v_coeffs_phys_addr;
+};
+
+/**
+ * b2r2_filters_init() - Initilizes the B2R2 filters
+ */
+int b2r2_filters_init(struct b2r2_control *control);
+
+/**
+ * b2r2_filters_init() - De-initilizes the B2R2 filters
+ */
+void b2r2_filters_exit(struct b2r2_control *control);
+
+/**
+ * b2r2_filter_find() - Find a filter matching the given scale factor
+ *
+ * @param scale_factor - Scale factor to find a filter for
+ *
+ * Returns NULL if no filter could be found.
+ */
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor);
+
+/**
+ * b2r2_filter_blur() - Returns the blur filter
+ *
+ * Returns NULL if no blur filter is available.
+ */
+struct b2r2_filter_spec *b2r2_filter_blur(void);
+
+#endif /* _LINUX_VIDEO_B2R2_FILTERS_H */
diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c
new file mode 100644
index 00000000000..4191e497e13
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.c
@@ -0,0 +1,3206 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include "b2r2_generic.h"
+#include "b2r2_internal.h"
+#include "b2r2_global.h"
+#include "b2r2_debug.h"
+#include "b2r2_filters.h"
+#include "b2r2_hw_convert.h"
+
+/*
+ * Debug printing
+ */
+#define B2R2_GENERIC_DEBUG_AREAS 0
+#define B2R2_GENERIC_DEBUG
+
+#define B2R2_GENERIC_WORK_BUF_WIDTH 16
+#define B2R2_GENERIC_WORK_BUF_HEIGHT 16
+#define B2R2_GENERIC_WORK_BUF_PITCH (16 * 4)
+#define B2R2_GENERIC_WORK_BUF_FMT B2R2_NATIVE_ARGB8888
+
+/*
+ * Private functions
+ */
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_control *cont,
+ struct b2r2_node *node)
+{
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ while (node != NULL) {
+ memset(&(node->node), 0, sizeof(node->node));
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7fffc;
+
+ if (node->next == NULL)
+ break;
+
+ node->node.GROUP0.B2R2_NIP = node->next->physical_address;
+
+ node = node->next;
+ }
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/**
+ * dump_nodes() - prints the node list
+ */
+static void dump_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first, bool dump_all)
+{
+ struct b2r2_node *node = first;
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+ do {
+ b2r2_log_debug(cont->dev, "\nNODE START:\n=============\n");
+ b2r2_log_debug(cont->dev, "B2R2_ACK: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_ACK);
+ b2r2_log_debug(cont->dev, "B2R2_INS: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_INS);
+ b2r2_log_debug(cont->dev, "B2R2_CIC: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_CIC);
+ b2r2_log_debug(cont->dev, "B2R2_NIP: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_NIP);
+
+ b2r2_log_debug(cont->dev, "B2R2_TSZ: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TSZ);
+ b2r2_log_debug(cont->dev, "B2R2_TXY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TXY);
+ b2r2_log_debug(cont->dev, "B2R2_TTY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TTY);
+ b2r2_log_debug(cont->dev, "B2R2_TBA: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S2CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S2CF);
+ b2r2_log_debug(cont->dev, "B2R2_S1CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S1CF);
+
+ b2r2_log_debug(cont->dev, "B2R2_S1SZ: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S1XY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S1TY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S1BA: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S2SZ: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S2XY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S2TY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S2BA: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S3SZ: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S3XY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S3TY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S3BA: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_CWS: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWS);
+ b2r2_log_debug(cont->dev, "B2R2_CWO: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWO);
+
+ b2r2_log_debug(cont->dev, "B2R2_FCTL: \t0x%.8x\n",
+ node->node.GROUP8.B2R2_FCTL);
+ b2r2_log_debug(cont->dev, "B2R2_RSF: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RSF);
+ b2r2_log_debug(cont->dev, "B2R2_RZI: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RZI);
+ b2r2_log_debug(cont->dev, "B2R2_HFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_HFP);
+ b2r2_log_debug(cont->dev, "B2R2_VFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_VFP);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RSF: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RSF);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RZI: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RZI);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_HFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_HFP);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_VFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_VFP);
+
+
+ b2r2_log_debug(cont->dev, "B2R2_IVMX0: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX0);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX1: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX1);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX2: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX2);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX3: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX3);
+ b2r2_log_debug(cont->dev, "\n=============\nNODE END\n");
+
+ node = node->next;
+ } while (node != NULL && dump_all);
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static inline enum b2r2_native_fmt to_native_fmt(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static inline enum b2r2_ty get_alpha_range(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ break;
+ default:
+ break;
+ }
+
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+}
+
+static unsigned int get_pitch(struct b2r2_control *cont,
+ enum b2r2_blt_fmt format, u32 width)
+{
+ switch (format) {
+ case B2R2_BLT_FMT_1_BIT_A1: {
+ int pitch = width >> 3;
+ /* Check for remainder */
+ if (width & 7)
+ pitch++;
+ return pitch;
+ break;
+ }
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return width;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* all 16 bits/pixel RGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* all 24 bits/pixel raster formats */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* all 32 bits/pixel formats */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return width * 4;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* width of the buffer must be a multiple of 4 */
+ if (width & 3) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+ break;
+ /* fall through, same pitch and pointers */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ /* width of the buffer must be a multiple of 2 */
+ if (width & 1) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* width of the buffer must be a multiple of 16. */
+ if (width & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ default:
+ b2r2_log_warn(cont->dev, "%s: Unable to determine pitch "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+}
+
+static s32 validate_buf(struct b2r2_control *cont,
+ const struct b2r2_blt_img *image,
+ const struct b2r2_resolved_buf *buf)
+{
+ u32 expect_buf_size;
+ u32 pitch;
+
+ if (image->width <= 0 || image->height <= 0) {
+ b2r2_log_warn(cont->dev, "%s: width=%d or height=%d negative"
+ ".\n", __func__, image->width, image->height);
+ return -EINVAL;
+ }
+
+ if (image->pitch == 0) {
+ /* autodetect pitch based on format and width */
+ pitch = get_pitch(cont, image->fmt, image->width);
+ } else
+ pitch = image->pitch;
+
+ expect_buf_size = pitch * image->height;
+
+ if (pitch == 0) {
+ b2r2_log_warn(cont->dev, "%s: Unable to detect pitch. "
+ "fmt=%#010x, width=%d\n",
+ __func__,
+ image->fmt, image->width);
+ return -EINVAL;
+ }
+
+ /* format specific adjustments */
+ switch (image->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ /*
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size +=
+ (pitch >> 1) * ((image->height + 1) >> 1) * 2;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ expect_buf_size += (pitch >> 1) * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ expect_buf_size += pitch * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size += pitch * ((image->height + 1) >> 1);
+ break;
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ */
+ expect_buf_size += pitch * image->height;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * (image->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * image->height;
+ break;
+ default:
+ break;
+ }
+
+ if (buf->file_len < expect_buf_size) {
+ b2r2_log_warn(cont->dev, "%s: Invalid buffer size:\n"
+ "fmt=%#010x w=%d h=%d buf.len=%d expect_buf_size=%d\n",
+ __func__,
+ image->fmt, image->width, image->height, buf->file_len,
+ expect_buf_size);
+ return -EINVAL;
+ }
+
+ if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) {
+ b2r2_log_warn(cont->dev, "%s: Virtual pointers not supported"
+ " yet.\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(struct b2r2_control *cont, u32 color,
+ const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+
+static void setup_fill_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ enum b2r2_native_fmt fill_fmt = 0;
+ u32 src_color = req->user_req.src_color;
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ struct b2r2_control *cont = req->instance->control;
+ bool fullrange = (req->user_req.flags &
+ B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ /* Determine format in src_color */
+ switch (dst_img->fmt) {
+ /* ARGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ } else {
+ /* SOURCE_FILL_RAW */
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
+ if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
+ /*
+ * Color is read from a register,
+ * where it is stored in ABGR format.
+ * Set up IVMX.
+ */
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_BGR);
+ }
+ }
+ break;
+ /* YUV formats */
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ * Format of the supplied src_color is
+ * B2R2_BLT_FMT_32_BIT_AYUV8888.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_TO_RGB);
+ } else {
+ /* SOURCE_FILL_RAW */
+ bool dst_yuv_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV444_PACKED_PLANAR ==
+ dst_img->fmt;
+
+ bool dst_yuv_semi_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt;
+
+ if (dst_yuv_planar || dst_yuv_semi_planar) {
+ /*
+ * SOURCE_FILL_RAW cannot be supported
+ * with multi-buffer formats.
+ * Force a legal format to prevent B2R2
+ * from misbehaving.
+ */
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ } else {
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (fullrange)
+ b2r2_setup_ivmx(node,
+ B2R2_CC_BLT_YUV888_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node,
+ B2R2_CC_BLT_YUV888_TO_RGB);
+ /*
+ * Re-arrange the color components from
+ * VUY(A) to (A)YUV
+ */
+ if (dst_img->fmt ==
+ B2R2_BLT_FMT_24_BIT_VUY888) {
+ u32 Y = src_color & 0xff;
+ u32 U = src_color & 0xff00;
+ u32 V = src_color & 0xff0000;
+ src_color = (Y << 16) | U | (V >> 16);
+ } else if (dst_img->fmt ==
+ B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ u32 A = src_color & 0xff;
+ u32 Y = src_color & 0xff00;
+ u32 U = src_color & 0xff0000;
+ u32 V = src_color & 0xff000000;
+ src_color = (A << 24) |
+ (Y << 8) |
+ (U >> 8) |
+ (V >> 24);
+ }
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to
+ * RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node,
+ B2R2_CC_YVU_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB);
+ break;
+ default:
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node,
+ B2R2_CC_YUV_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB);
+ break;
+ }
+ }
+ break;
+ default:
+ src_color = 0;
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ /* Set color fill on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = 0;
+ node->node.GROUP4.B2R2_STY =
+ (0 << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ fill_fmt |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL;
+ node->node.GROUP2.B2R2_S2CF = src_color;
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_img *src_img = &(req->user_req.src_img);
+ u32 src_pitch = 0;
+ /* horizontal and vertical scan order for out_buf */
+ enum b2r2_ty dst_hso = B2R2_TY_HSO_LEFT_TO_RIGHT;
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ u32 endianness = 0;
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ bool yuv_semi_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ struct b2r2_filter_spec *hf;
+ struct b2r2_filter_spec *vf;
+
+ bool use_h_filter = false;
+ bool use_v_filter = false;
+
+ struct b2r2_control *cont = req->instance->control;
+ bool fullrange = (req->user_req.flags &
+ B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (((B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW) &
+ req->user_req.flags) != 0) {
+ setup_fill_input_stage(req, node, out_buf);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+ return;
+ }
+
+ if (src_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ src_pitch = get_pitch(cont, src_img->fmt, src_img->width);
+ } else {
+ src_pitch = src_img->pitch;
+ }
+
+ b2r2_log_info(cont->dev, "%s transform=%#010x\n",
+ __func__, req->user_req.transform);
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+
+ use_h_filter = h_scf != (1 << 10);
+ use_v_filter = v_scf != (1 << 10);
+
+ /* B2R2_BLT_FLAG_BLUR overrides any scaling filter. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ /* Configure horizontal rescale */
+ if (h_scf != (1 << 10)) {
+ b2r2_log_info(cont->dev, "%s: Scaling horizontally by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ h_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ /* Configure vertical rescale */
+ if (v_scf != (1 << 10)) {
+ b2r2_log_info(cont->dev, "%s: Scaling vertically by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ v_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+
+ /* Adjustments that depend on the source format */
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_BGR);
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB);
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB);
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_TO_RGB);
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (src_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /*
+ * Luma handled in the same way
+ * for all YUV multi-buffer formats.
+ * Set luma rescale registers.
+ */
+ u32 rsf_luma = 0;
+ u32 rzi_luma = 0;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA;
+
+ if (src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB);
+ } else {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB);
+ }
+
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ rsf_luma |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi_luma |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ rsf_luma |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi_luma |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP10.B2R2_RSF = rsf_luma;
+ node->node.GROUP10.B2R2_RZI = rzi_luma;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (v_scf >> 1) << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ vf = b2r2_filter_find(v_scf >> 1);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ use_h_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * Set the filter control and rescale registers.
+ * GROUP9 registers are used for all single-buffer formats
+ * or for chroma in case of multi-buffer YUV formats.
+ * h/v_filter is now appropriately selected for chroma scaling,
+ * be it YUV multi-buffer, or single-buffer raster format.
+ * B2R2_BLT_FLAG_BLUR overrides any scaling filter.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI |= rzi;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * Flip transform is done before potential rotation.
+ * This can be achieved with appropriate scan order.
+ * Transform stage will only do rotation.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ dst_hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ dst_hso | dst_vso;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ else
+ cb_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (src_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (src_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ src_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ src_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance has full resolution, same as luminance.*/
+ chroma_pitch = src_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * src_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ u32 chroma_pitch = src_pitch;
+
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, src_img->fmt) |
+ get_alpha_range(cont, src_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ if ((req->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO = B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = req->clut_phys_addr;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_transform_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf)
+{
+ /* vertical scan order for out_buf */
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ enum b2r2_blt_transform transform = req->user_req.transform;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Scan order must be flipped otherwise contents will
+ * be mirrored vertically. Leftmost column of in_buf
+ * would become top instead of bottom row of out_buf.
+ */
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT | dst_vso;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/*
+static void setup_mask_stage(const struct b2r2_blt_request req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf);
+*/
+
+static void setup_dst_read_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 endianness = 0;
+ bool yuv_semi_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ u32 dst_pitch = 0;
+ struct b2r2_control *cont = req->instance->control;
+ bool fullrange = (req->user_req.flags &
+ B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0;
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
+ } else {
+ dst_pitch = dst_img->pitch;
+ }
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ /* Adjustments that depend on the destination format */
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_BGR);
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped
+ * so it is YVU and not YUV.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB);
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB);
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_TO_RGB);
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (dst_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ if (dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB);
+ } else {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB);
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ default:
+ break;
+ }
+ /* Set the filter control and rescale registers for chroma */
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL | B2R2_CIC_RESIZE_CHROMA;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_blend_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *bg_buf,
+ struct b2r2_work_buf *fg_buf)
+{
+ u32 global_alpha = req->user_req.global_alpha;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ node->node.GROUP0.B2R2_ACK = 0;
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)) {
+ /* Some kind of blending needs to be done. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT)
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /*
+ * global_alpha register accepts 0..128 range,
+ * global_alpha in the request is 0..255, remap needed.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+ if (global_alpha == 255)
+ global_alpha = 128;
+ else
+ global_alpha >>= 1;
+ } else {
+ /*
+ * Use solid global_alpha
+ * if global alpha blending is not set.
+ */
+ global_alpha = 128;
+ }
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << (B2R2_ACK_GALPHA_ROPID_SHIFT);
+
+ /* Set background on SRC1 channel */
+ node->node.GROUP3.B2R2_SBA = bg_buf->phys_addr;
+ node->node.GROUP3.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set foreground on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2;
+ } else {
+ /*
+ * No blending, foreground goes on SRC2. No global alpha.
+ * EMACSOC TODO: The blending stage should be skipped altogether
+ * if no blending is to be done. Probably could go directly from
+ * transform to writeback.
+ */
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ }
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_writeback_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *in_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ const enum b2r2_blt_fmt dst_fmt = dst_img->fmt;
+ const bool yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ const bool yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ const u32 group4_b2r2_sty =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ u32 dst_dither = 0;
+ u32 dst_pitch = 0;
+ u32 endianness = 0;
+
+ struct b2r2_control *cont = req->instance->control;
+ bool fullrange = (req->user_req.flags &
+ B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
+ } else
+ dst_pitch = dst_img->pitch;
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DITHER) != 0)
+ dst_dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ /* Set target buffer(s) */
+ if (yuv_planar_dst) {
+ /*
+ * three nodes required to write the output.
+ * Luma, blue chroma and red chroma.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW;
+
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ /*
+ * YUV422 or YVU422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ /*
+ * No scaling required since
+ * chrominance is not subsampled.
+ */
+ default:
+ break;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV);
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Blue chroma (U-component)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cb_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV);
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+
+ /*
+ * Red chroma (V-component)
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * CR_NOT_CB.
+ */
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cr_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TTY_CB_NOT_CR |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV);
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else if (yuv_semi_planar_dst) {
+ /*
+ * two nodes required to write the output.
+ * One node for luma and one for interleaved chroma
+ * components.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW;
+
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR) {
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ } else {
+ /*
+ * YUV422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU);
+ } else {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV);
+ }
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Chroma (UV-components)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = chroma_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU);
+ } else {
+ if (fullrange)
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL);
+ else
+ b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV);
+ }
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS =
+ group0_b2r2_ins | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic |
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else {
+ /* single buffer target */
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_BGR);
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ if (fullrange)
+ b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_YVU_FULL);
+ else
+ b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_YVU);
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (fullrange)
+ b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_BLT_YUV888_FULL);
+ else
+ b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_BLT_YUV888);
+ /*
+ * Re-arrange color components from (A)YUV to VUY(A)
+ * when bytes are stored in memory.
+ */
+ if (dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ endianness;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_CLIP_WINDOW;
+
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(cont, req->user_req.src_color,
+ req->user_req.src_img.fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ }
+ /*
+ * Writeback is the last stage. Terminate the program chain
+ * to prevent out-of-control B2R2 execution.
+ */
+ node->node.GROUP0.B2R2_NIP = 0;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/*
+ * Public functions
+ */
+void b2r2_generic_init(struct b2r2_control *cont)
+{
+
+}
+
+void b2r2_generic_exit(struct b2r2_control *cont)
+{
+
+}
+
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count)
+{
+ /*
+ * Need at least 4 nodes, read or fill input, read dst, blend
+ * and write back the result */
+ u32 n_nodes = 4;
+ /* Need at least 2 bufs, 1 for blend output and 1 for input */
+ u32 n_work_bufs = 2;
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ enum b2r2_blt_fmt dst_fmt = 0;
+ bool is_src_fill = false;
+ bool yuv_planar_dst;
+ bool yuv_semi_planar_dst;
+ struct b2r2_blt_rect src_rect;
+ struct b2r2_blt_rect dst_rect;
+ struct b2r2_control *cont = req->instance->control;
+
+ if (req == NULL || work_buf_width == NULL || work_buf_height == NULL ||
+ work_buf_count == NULL || node_count == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid in or out pointers:\n"
+ "req=0x%p\n"
+ "work_buf_width=0x%p work_buf_height=0x%p "
+ "work_buf_count=0x%p\n"
+ "node_count=0x%p.\n",
+ __func__,
+ req,
+ work_buf_width, work_buf_height,
+ work_buf_count,
+ node_count);
+ return -EINVAL;
+ }
+
+ dst_fmt = req->user_req.dst_img.fmt;
+
+ is_src_fill = (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+ yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ *node_count = 0;
+ *work_buf_width = 0;
+ *work_buf_height = 0;
+ *work_buf_count = 0;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ n_nodes++;
+ n_work_bufs++;
+ }
+
+ if ((yuv_planar_dst || yuv_semi_planar_dst) &&
+ (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source_fill_raw"
+ " and multi-buffer destination.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 &&
+ (req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source and "
+ "destination color keying.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
+ B2R2_BLT_FLAG_DEST_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
+ "source_fill and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
+ "blending and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: source mask"
+ "and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK)) {
+ enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_src =
+ src_fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ if (yuv_src || src_fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ src_fmt == B2R2_BLT_FMT_8_BIT_A8) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source "
+ "color keying with YUV or pure alpha "
+ "formats.\n", __func__);
+ return -ENOSYS;
+ }
+ }
+
+ /* Check for invalid dimensions that would hinder scale calculations */
+ src_rect = req->user_req.src_rect;
+ dst_rect = req->user_req.dst_rect;
+ /* Check for invalid src_rect unless src_fill is enabled */
+ if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 ||
+ src_rect.x + src_rect.width > req->user_req.src_img.width ||
+ src_rect.y + src_rect.height > req->user_req.src_img.height)) {
+ b2r2_log_warn(cont->dev, "%s: src_rect outside src_img:\n"
+ "src(x,y,w,h)=(%d, %d, %d, %d) "
+ "src_img(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.x, src_rect.y, src_rect.width, src_rect.height,
+ req->user_req.src_img.width,
+ req->user_req.src_img.height);
+ return -EINVAL;
+ }
+
+ if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) {
+ b2r2_log_warn(cont->dev, "%s: Invalid source dimensions:\n"
+ "src(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.width, src_rect.height);
+ return -EINVAL;
+ }
+
+ if (dst_rect.width <= 0 || dst_rect.height <= 0) {
+ b2r2_log_warn(cont->dev, "%s: Invalid dest dimensions:\n"
+ "dst(w,h)=(%d, %d).\n",
+ __func__,
+ dst_rect.width, dst_rect.height);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for invalid image params */
+ if (!is_src_fill && validate_buf(cont, &(req->user_req.src_img),
+ &(req->src_resolved)))
+ return -EINVAL;
+
+ if (validate_buf(cont, &(req->user_req.dst_img), &(req->dst_resolved)))
+ return -EINVAL;
+
+ if (is_src_fill) {
+ /*
+ * Params correct for a source fill operation.
+ * No need for further checking.
+ */
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d "
+ "buf_count=%d node_count=%d\n", __func__,
+ *work_buf_width, *work_buf_height,
+ *work_buf_count, *node_count);
+ return 0;
+ }
+
+ /*
+ * Calculate scaling factors, all transform enum values
+ * that include rotation have the CCW_ROT_90 bit set.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect.width << 10) / dst_rect.height;
+ v_scf = (src_rect.height << 10) / dst_rect.width;
+ } else {
+ h_scf = (src_rect.width << 10) / dst_rect.width;
+ v_scf = (src_rect.height << 10) / dst_rect.height;
+ }
+
+ /* Check for degenerate/out_of_range scaling factors. */
+ if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) {
+ b2r2_log_warn(cont->dev,
+ "%s: Dimensions result in degenerate or "
+ "out of range scaling:\n"
+ "src(w,h)=(%d, %d) "
+ "dst(w,h)=(%d,%d).\n"
+ "h_scf=0x%.8x, v_scf=0x%.8x\n",
+ __func__,
+ src_rect.width, src_rect.height,
+ dst_rect.width, dst_rect.height,
+ h_scf, v_scf);
+ return -EINVAL;
+ }
+
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d buf_count=%d "
+ "node_count=%d\n", __func__, *work_buf_width,
+ *work_buf_height, *work_buf_count, *node_count);
+ return 0;
+}
+
+/*
+ *
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count)
+{
+ struct b2r2_node *node = NULL;
+ struct b2r2_work_buf *in_buf = NULL;
+ struct b2r2_work_buf *out_buf = NULL;
+ struct b2r2_work_buf *empty_buf = NULL;
+ struct b2r2_control *cont = req->instance->control;
+
+#ifdef B2R2_GENERIC_DEBUG
+ u32 needed_bufs = 0;
+ u32 needed_nodes = 0;
+ s32 work_buf_width = 0;
+ s32 work_buf_height = 0;
+ u32 n_nodes = 0;
+ int invalid_req = b2r2_generic_analyze(req, &work_buf_width,
+ &work_buf_height, &needed_bufs,
+ &needed_nodes);
+ if (invalid_req < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid request supplied, ec=%d\n",
+ __func__, invalid_req);
+ return -EINVAL;
+ }
+
+ node = first;
+
+ while (node != NULL) {
+ n_nodes++;
+ node = node->next;
+ }
+ if (n_nodes < needed_nodes) {
+ b2r2_log_warn(cont->dev, "%s: Not enough nodes %d < %d.\n",
+ __func__, n_nodes, needed_nodes);
+ return -EINVAL;
+ }
+
+ if (buf_count < needed_bufs) {
+ b2r2_log_warn(cont->dev, "%s: Not enough buffers %d < %d.\n",
+ __func__, buf_count, needed_bufs);
+ return -EINVAL;
+ }
+
+#endif
+
+ reset_nodes(cont, first);
+ node = first;
+ empty_buf = tmp_bufs;
+ out_buf = empty_buf;
+ empty_buf++;
+ /* Prepare input tile. Color_fill or read from src */
+ setup_input_stage(req, node, out_buf);
+ in_buf = out_buf;
+ out_buf = empty_buf;
+ empty_buf++;
+ node = node->next;
+
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ setup_transform_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ /* EMACSOC TODO: mask */
+ /*
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ setup_mask_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ */
+ /* Read the part of destination that will be updated */
+ setup_dst_read_stage(req, node, out_buf);
+ node = node->next;
+ setup_blend_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ setup_writeback_stage(req, node, in_buf);
+ return 0;
+}
+
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area)
+{
+ /*
+ * Nodes come in the following order: <input stage>, [transform],
+ * [src_mask], <dst_read>, <blend>, <writeback>
+ */
+ struct b2r2_node *node = first;
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_multi_buffer_src =
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ const enum b2r2_blt_fmt dst_fmt = req->user_req.dst_img.fmt;
+ const bool yuv_multi_buffer_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ u32 b2r2_rzi = 0;
+ s32 clip_top = 0;
+ s32 clip_left = 0;
+ s32 clip_bottom = req->user_req.dst_img.height - 1;
+ s32 clip_right = req->user_req.dst_img.width - 1;
+ /* Dst coords inside the dst_rect, not the buffer */
+ s32 dst_x = dst_rect_area->x;
+ s32 dst_y = dst_rect_area->y;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Normally the inverse transform for 90 degree rotation
+ * is given by:
+ * | 0 1| |x| | y|
+ * | | X | | = | |
+ * |-1 0| |y| |-x|
+ * but screen coordinates are flipped in y direction
+ * (compared to usual Cartesian coordinates), hence the offsets.
+ */
+ src_x = (dst_rect->height - dst_y - dst_rect_area->height) *
+ h_scf;
+ src_y = dst_x * v_scf;
+ src_w = dst_rect_area->height * h_scf;
+ src_h = dst_rect_area->width * v_scf;
+ } else {
+ src_x = dst_x * h_scf;
+ src_y = dst_y * v_scf;
+ src_w = dst_rect_area->width * h_scf;
+ src_h = dst_rect_area->height * v_scf;
+ }
+
+ b2r2_rzi |= ((src_x & 0x3ff) << B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular tile.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination tile, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination tile maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination tile is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ * However, the reading should not start at x == 5.0
+ * but a bit inside, namely x == 5.4
+ * The B2R2_RZI register is used to instruct the HW to do so.
+ * It contains the fractional part that will be added to
+ * the first pixel coordinate, before incrementing the current source
+ * coordinate with the step specified in B2R2_RSF register.
+ * The same applies to scaling in vertical direction.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ /*
+ * EMACSOC TODO: Remove this debug clamp, once tile size
+ * is taken into account in generic_analyze()
+ */
+ if (src_w > 128)
+ src_w = 128;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = src_rect->width - src_x - src_w;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src/dst_rect coordinates into true
+ * src/dst_buffer coordinates
+ */
+ src_x += src_rect->x;
+ src_y += src_rect->y;
+
+ dst_x += dst_rect->x;
+ dst_y += dst_rect->y;
+
+ /*
+ * Clamp the src coords to buffer dimensions
+ * to prevent illegal reads.
+ */
+ if (src_x < 0)
+ src_x = 0;
+
+ if (src_y < 0)
+ src_y = 0;
+
+ if ((src_x + src_w) > req->user_req.src_img.width)
+ src_w = req->user_req.src_img.width - src_x;
+
+ if ((src_y + src_h) > req->user_req.src_img.height)
+ src_h = req->user_req.src_img.height - src_y;
+
+
+ /* The input node */
+ if (yuv_multi_buffer_src) {
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP10.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP10.B2R2_RZI |= b2r2_rzi;
+
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ switch (src_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((src_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((src_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |= (b2r2_rzi >> 1) &
+ ((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |=
+ (((src_x & 0x3ff) >> 1) <<
+ B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP4.B2R2_SSZ;
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ break;
+ default:
+ break;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * dst_rect_area coordinates are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped.
+ * Horizontal and vertical flips are accomplished with
+ * suitable scanning order while writing
+ * to the temporary buffer.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ /*
+ * Scan order for source fill should always be left-to-right
+ * and top-to-bottom. Fill the input tile from top left.
+ */
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP1.B2R2_TSZ;
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Input node done.\n", __func__);
+ }
+
+ /* Transform */
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ /*
+ * Transform node operates on temporary buffers.
+ * Content always at top left, but scanning order
+ * has to be flipped during rotation.
+ * Width and height need to be considered as well, since
+ * a tile may not necessarily be filled completely.
+ * dst_rect_area dimensions are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped on src.
+ */
+ node = node->next;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ /* Bottom line written first */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Tranform node done.\n", __func__);
+ }
+ }
+
+ /* Source mask */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ node = node->next;
+ /*
+ * Same coords for mask as for the input stage.
+ * Should the mask be transformed together with source?
+ * EMACSOC TODO: Apply mask before any
+ * transform/scaling is done.
+ * Otherwise it will be dst_ not src_mask.
+ */
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Source mask node done.\n", __func__);
+ }
+ }
+
+ /* dst_read */
+ if (yuv_multi_buffer_dst) {
+ s32 dst_w = dst_rect_area->width;
+ s32 dst_h = dst_rect_area->height;
+ bool yuv420_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv422_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ node = node->next;
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (yuv420_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (yuv422_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ node = node->next;
+ node->node.GROUP4.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s dst_read node done.\n", __func__);
+ }
+
+ /* blend */
+ node = node->next;
+ node->node.GROUP3.B2R2_SXY = 0;
+ node->node.GROUP3.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ /* contents of the foreground temporary buffer always at top left */
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Blend node done.\n", __func__);
+ }
+
+ /* writeback */
+ node = node->next;
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0) {
+ clip_left = req->user_req.dst_clip_rect.x;
+ clip_top = req->user_req.dst_clip_rect.y;
+ clip_right = clip_left + req->user_req.dst_clip_rect.width - 1;
+ clip_bottom = clip_top + req->user_req.dst_clip_rect.height - 1;
+ }
+ /*
+ * Clamp the dst clip rectangle to buffer dimensions to prevent
+ * illegal writes. An illegal clip rectangle, e.g. outside the
+ * buffer will be ignored, resulting in nothing being clipped.
+ */
+ if (clip_left < 0 || req->user_req.dst_img.width <= clip_left)
+ clip_left = 0;
+
+ if (clip_top < 0 || req->user_req.dst_img.height <= clip_top)
+ clip_top = 0;
+
+ if (clip_right < 0 || req->user_req.dst_img.width <= clip_right)
+ clip_right = req->user_req.dst_img.width - 1;
+
+ if (clip_bottom < 0 || req->user_req.dst_img.height <= clip_bottom)
+ clip_bottom = req->user_req.dst_img.height - 1;
+
+ /*
+ * Only allow writing inside the clip rect.
+ * INTNL bit in B2R2_CWO should be zero.
+ */
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (yuv_multi_buffer_dst) {
+ const s32 dst_w = dst_rect_area->width;
+ const s32 dst_h = dst_rect_area->height;
+ int i = 0;
+ /* Number of nodes required to write chroma output */
+ int n_nodes = 1;
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR)
+ n_nodes = 2;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Writeback luma node done.\n", __func__);
+ }
+
+ node = node->next;
+
+ /*
+ * Chroma components. 1 or 2 nodes
+ * for semi-planar or planar buffer respectively.
+ */
+ for (i = 0; i < n_nodes && node != NULL; ++i) {
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma is half the size of luma.
+ * Must round up the chroma size to handle
+ * cases when luma size is not divisible by 2.
+ * E.g. luma_width==7 requires chroma_width==4.
+ * Chroma_width==7/2==3 is only enough
+ * for luma_width==6.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) <<
+ B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Now chroma is half the size of luma only
+ * in horizontal direction.
+ * Same rounding applies as
+ * for 420 formats above, except it is only
+ * done horizontally.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma has the same resolution as luma.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback chroma "
+ "node %d of %d done.\n",
+ __func__, i + 1, n_nodes);
+ }
+
+ node = node->next;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback node done.\n",
+ __func__);
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
diff --git a/drivers/video/b2r2/b2r2_generic.h b/drivers/video/b2r2/b2r2_generic.h
new file mode 100644
index 00000000000..3b22f654deb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_GENERIC_H
+#define _LINUX_VIDEO_B2R2_GENERIC_H
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+/**
+ * b2r2_generic_init()
+ */
+void b2r2_generic_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_generic_exit()
+ */
+void b2r2_generic_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_generic_analyze()
+ */
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count);
+/**
+ * b2r2_generic_configure()
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count);
+/**
+ * b2r2_generic_set_areas()
+ */
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area);
+#endif
diff --git a/drivers/video/b2r2/b2r2_global.h b/drivers/video/b2r2/b2r2_global.h
new file mode 100644
index 00000000000..38cf74bb753
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_global.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 global definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_GLOBAL_H
+#define __B2R2_GLOBAL_H
+
+/** Sources involved */
+
+struct b2r2_system {
+ unsigned int B2R2_NIP;
+ unsigned int B2R2_CIC;
+ unsigned int B2R2_INS;
+ unsigned int B2R2_ACK;
+};
+
+struct b2r2_target {
+ unsigned int B2R2_TBA;
+ unsigned int B2R2_TTY;
+ unsigned int B2R2_TXY;
+ unsigned int B2R2_TSZ;
+};
+
+struct b2r2_color_fill {
+ unsigned int B2R2_S1CF;
+ unsigned int B2R2_S2CF;
+};
+
+struct b2r2_src_config {
+ unsigned int B2R2_SBA;
+ unsigned int B2R2_STY;
+ unsigned int B2R2_SXY;
+ unsigned int B2R2_SSZ;
+};
+
+struct b2r2_clip {
+ unsigned int B2R2_CWO;
+ unsigned int B2R2_CWS;
+};
+
+struct b2r2_color_key {
+ unsigned int B2R2_KEY1;
+ unsigned int B2R2_KEY2;
+};
+
+struct b2r2_clut {
+ unsigned int B2R2_CCO;
+ unsigned int B2R2_CML;
+};
+
+struct b2r2_rsz_pl_mask {
+ unsigned int B2R2_FCTL;
+ unsigned int B2R2_PMK;
+};
+
+struct b2r2_Cr_luma_rsz {
+ unsigned int B2R2_RSF;
+ unsigned int B2R2_RZI;
+ unsigned int B2R2_HFP;
+ unsigned int B2R2_VFP;
+};
+
+struct b2r2_flikr_filter {
+ unsigned int B2R2_FF0;
+ unsigned int B2R2_FF1;
+ unsigned int B2R2_FF2;
+ unsigned int B2R2_FF3;
+};
+
+struct b2r2_xyl {
+ unsigned int B2R2_XYL;
+ unsigned int B2R2_XYP;
+};
+
+struct b2r2_sau {
+ unsigned int B2R2_SAR;
+ unsigned int B2R2_USR;
+};
+
+struct b2r2_vm {
+ unsigned int B2R2_VMX0;
+ unsigned int B2R2_VMX1;
+ unsigned int B2R2_VMX2;
+ unsigned int B2R2_VMX3;
+};
+
+struct b2r2_link_list {
+
+ struct b2r2_system GROUP0;
+ struct b2r2_target GROUP1;
+ struct b2r2_color_fill GROUP2;
+ struct b2r2_src_config GROUP3;
+ struct b2r2_src_config GROUP4;
+ struct b2r2_src_config GROUP5;
+ struct b2r2_clip GROUP6;
+ struct b2r2_clut GROUP7;
+ struct b2r2_rsz_pl_mask GROUP8;
+ struct b2r2_Cr_luma_rsz GROUP9;
+ struct b2r2_Cr_luma_rsz GROUP10;
+ struct b2r2_flikr_filter GROUP11;
+ struct b2r2_color_key GROUP12;
+ struct b2r2_xyl GROUP13;
+ struct b2r2_sau GROUP14;
+ struct b2r2_vm GROUP15;
+ struct b2r2_vm GROUP16;
+
+ unsigned int B2R2_RESERVED[2];
+};
+
+
+#endif /* !defined(__B2R2_GLOBAL_H) */
diff --git a/drivers/video/b2r2/b2r2_hw.h b/drivers/video/b2r2/b2r2_hw.h
new file mode 100644
index 00000000000..9739912d78c
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_hw.h
@@ -0,0 +1,458 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 hw definitions
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef B2R2_HW_H__
+#define B2R2_HW_H__
+
+#include <linux/bitops.h>
+
+/* Scaling works in strips 128 pixels wide */
+#define B2R2_RESCALE_MAX_WIDTH 128
+
+/* Rotation works in strips 16 pixels wide */
+#define B2R2_ROTATE_MAX_WIDTH 16
+
+/* B2R2 color formats */
+#define B2R2_COLOR_FORMAT_SHIFT 16
+enum b2r2_native_fmt {
+ /* RGB formats */
+ B2R2_NATIVE_RGB565 = 0x00 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_RGB888 = 0x01 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8565 = 0x04 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8888 = 0x05 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB1555 = 0x06 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB4444 = 0x07 << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* YCbCr formats */
+ B2R2_NATIVE_YCBCR888 = 0x10 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR422R = 0x12 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_AYCBCR8888 = 0x15 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MB = 0x14 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_R2B = 0x16 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MBN = 0x0e << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* CLUT formats */
+ B2R2_NATIVE_CLUT2 = 0x09 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_CLUT8 = 0x0b << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT44 = 0x0c << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT88 = 0x0d << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* Misc. formats */
+ B2R2_NATIVE_A1 = 0x18 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_A8 = 0x19 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YUV = 0x1e << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_BYTE = 0x1f << B2R2_COLOR_FORMAT_SHIFT,
+};
+
+/* B2R2_CIC register values */
+enum b2r2_cic {
+ B2R2_CIC_COLOR_FILL = BIT(1),/*0x00000002*/
+ B2R2_CIC_SOURCE_1 = BIT(2),/*0x00000004*/
+ B2R2_CIC_SOURCE_2 = BIT(3),/*0x00000008*/
+ B2R2_CIC_SOURCE_3 = BIT(4),/*0x00000010*/
+ B2R2_CIC_CLIP_WINDOW = BIT(5),/*0x00000020*/
+ B2R2_CIC_CLUT = BIT(6),/*0x00000040*/
+ B2R2_CIC_FILTER_CONTROL = BIT(7),/*0x00000080*/
+ B2R2_CIC_RESIZE_CHROMA = BIT(8),/*0x00000100*/
+ B2R2_CIC_RESIZE_LUMA = BIT(9),/*0x00000200*/
+ B2R2_CIC_FLICKER_COEFF = BIT(10),/*0x00000400*/
+ B2R2_CIC_COLOR_KEY = BIT(11),/*0x00000800*/
+ B2R2_CIC_XYL = BIT(12),/*0x00001000*/
+ B2R2_CIC_SAU = BIT(13),/*0x00002000*/
+ B2R2_CIC_IVMX = BIT(14),/*0x00004000*/
+ B2R2_CIC_OVMX = BIT(15),/*0x00008000*/
+ B2R2_CIC_PACEDOT = BIT(16),/*0x00010000*/
+ B2R2_CIC_VC1 = BIT(17)/*0x00020000*/
+};
+
+/* B2R2_INS register values */
+#define B2R2_INS_SOURCE_1_SHIFT 0
+#define B2R2_INS_SOURCE_2_SHIFT 3
+#define B2R2_INS_SOURCE_3_SHIFT 5
+#define B2R2_INS_IVMX_SHIFT 6
+#define B2R2_INS_CLUTOP_SHIFT 7
+#define B2R2_INS_RESCALE2D_SHIFT 8
+#define B2R2_INS_FLICK_FILT_SHIFT 9
+#define B2R2_INS_RECT_CLIP_SHIFT 10
+#define B2R2_INS_CKEY_SHIFT 11
+#define B2R2_INS_OVMX_SHIFT 12
+#define B2R2_INS_DEI_SHIFT 13
+#define B2R2_INS_PLANE_MASK_SHIFT 14
+#define B2R2_INS_XYL_SHIFT 15
+#define B2R2_INS_DOT_SHIFT 16
+#define B2R2_INS_VC1R_SHIFT 17
+#define B2R2_INS_ROTATION_SHIFT 18
+#define B2R2_INS_PACE_DOWN_SHIFT 30
+#define B2R2_INS_BLITCOMPIRQ_SHIFT 31
+enum b2r2_ins {
+ /* Source 1 config */
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_COPY = 0x4 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_FILL = 0x7 << B2R2_INS_SOURCE_1_SHIFT,
+
+ /* Source 2 config */
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_2_SHIFT,
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_2_SHIFT,
+
+ /* Source 3 config */
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_3_SHIFT,
+
+ /* Other configs */
+ B2R2_INS_IVMX_ENABLED = 0x1 << B2R2_INS_IVMX_SHIFT,
+ B2R2_INS_CLUTOP_ENABLED = 0x1 << B2R2_INS_CLUTOP_SHIFT,
+ B2R2_INS_RESCALE2D_ENABLED = 0x1 << B2R2_INS_RESCALE2D_SHIFT,
+ B2R2_INS_FLICK_FILT_ENABLED = 0x1 << B2R2_INS_FLICK_FILT_SHIFT,
+ B2R2_INS_RECT_CLIP_ENABLED = 0x1 << B2R2_INS_RECT_CLIP_SHIFT,
+ B2R2_INS_CKEY_ENABLED = 0x1 << B2R2_INS_CKEY_SHIFT,
+ B2R2_INS_OVMX_ENABLED = 0x1 << B2R2_INS_OVMX_SHIFT,
+ B2R2_INS_DEI_ENABLED = 0x1 << B2R2_INS_DEI_SHIFT,
+ B2R2_INS_PLANE_MASK_ENABLED = 0x1 << B2R2_INS_PLANE_MASK_SHIFT,
+ B2R2_INS_XYL_ENABLED = 0x1 << B2R2_INS_XYL_SHIFT,
+ B2R2_INS_DOT_ENABLED = 0x1 << B2R2_INS_DOT_SHIFT,
+ B2R2_INS_VC1R_ENABLED = 0x1 << B2R2_INS_VC1R_SHIFT,
+ B2R2_INS_ROTATION_ENABLED = 0x1 << B2R2_INS_ROTATION_SHIFT,
+ B2R2_INS_PACE_DOWN_ENABLED = 0x1 << B2R2_INS_PACE_DOWN_SHIFT,
+ B2R2_INS_BLITCOMPIRQ_ENABLED = 0x1 << B2R2_INS_BLITCOMPIRQ_SHIFT,
+
+};
+
+/* B2R2_ACK register values */
+#define B2R2_ACK_MODE_SHIFT 0
+#define B2R2_ACK_SWAP_FG_BG_SHIFT 4
+#define B2R2_ACK_GALPHA_ROPID_SHIFT 8
+#define B2R2_ACK_CKEY_BLUE_SHIFT 16
+#define B2R2_ACK_CKEY_GREEN_SHIFT 18
+#define B2R2_ACK_CKEY_RED_SHIFT 20
+#define B2R2_ACK_CKEY_SEL_SHIFT 22
+enum b2r2_ack {
+ /* ALU operation modes */
+ B2R2_ACK_MODE_LOGICAL_OPERATION = 0x1 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT = 0x2 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_PREMULT = 0x3 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_FIRST_PASS = 0x4 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_BLEND = 0x5 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BYPASS_S2_S3 = 0x7 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_SECOND_PASS = 0x8 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_LOGICAL = 0x9 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_NOT_PREMULT =
+ 0xa << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_PREMULT = 0xb << B2R2_ACK_MODE_SHIFT,
+
+ /* ALU channel selection */
+ B2R2_ACK_SWAP_FG_BG = 0x1 << B2R2_ACK_SWAP_FG_BG_SHIFT,
+
+ /* Global alpha and ROP IDs */
+ B2R2_ACK_ROP_CLEAR = 0x0 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND = 0x1 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_REV = 0x2 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY = 0x3 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_INV = 0x4 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOOP = 0x5 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_XOR = 0x6 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR = 0x7 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOR = 0x8 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_EQUIV = 0x9 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_INVERT = 0xa << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_REV = 0xb << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY_INV = 0xc << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_INV = 0xd << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NAND = 0xe << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_SET = 0xf << B2R2_ACK_GALPHA_ROPID_SHIFT,
+
+ /* Color key configuration bits */
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_RED_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_RED_SHIFT,
+
+ /* Color key input selection */
+ B2R2_ACK_CKEY_SEL_DEST = 0x0 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_BEFORE_CLUT = 0x1 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT = 0x2 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_BLANKING_S2_ALPHA = 0x3 << B2R2_ACK_CKEY_SEL_SHIFT,
+};
+
+/* Common <S/T>TY defines */
+#define B2R2_TY_BITMAP_PITCH_SHIFT 0
+#define B2R2_TY_COLOR_FORM_SHIFT 16
+#define B2R2_TY_ALPHA_RANGE_SHIFT 21
+#define B2R2_TY_MB_ACCESS_MODE_SHIFT 23
+#define B2R2_TY_HSO_SHIFT 24
+#define B2R2_TY_VSO_SHIFT 25
+#define B2R2_TY_SUBBYTE_SHIFT 28
+#define B2R2_TY_ENDIAN_SHIFT 30
+#define B2R2_TY_SECURE_SHIFT 31
+
+/* Dummy enum for generalization of <S/T>TY registers */
+enum b2r2_ty {
+ /* Alpha range */
+ B2R2_TY_ALPHA_RANGE_128 = 0x0 << B2R2_TY_ALPHA_RANGE_SHIFT,
+ B2R2_TY_ALPHA_RANGE_255 = 0x1 << B2R2_TY_ALPHA_RANGE_SHIFT,
+
+ /* Access mode in macro-block organized frame buffers */
+ B2R2_TY_MB_ACCESS_MODE_FRAME = 0x0 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+ B2R2_TY_MB_ACCESS_MODE_FIELD = 0x1 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+
+ /* Horizontal scan order */
+ B2R2_TY_HSO_LEFT_TO_RIGHT = 0x0 << B2R2_TY_HSO_SHIFT,
+ B2R2_TY_HSO_RIGHT_TO_LEFT = 0x1 << B2R2_TY_HSO_SHIFT,
+
+ /* Vertical scan order */
+ B2R2_TY_VSO_TOP_TO_BOTTOM = 0x0 << B2R2_TY_VSO_SHIFT,
+ B2R2_TY_VSO_BOTTOM_TO_TOP = 0x1 << B2R2_TY_VSO_SHIFT,
+
+ /* Pixel ordering for sub-byte formats (position of right-most pixel) */
+ B2R2_TY_SUBBYTE_MSB = 0x0 << B2R2_TY_SUBBYTE_SHIFT,
+ B2R2_TY_SUBBYTE_LSB = 0x1 << B2R2_TY_SUBBYTE_SHIFT,
+
+ /* Bitmap endianess */
+ B2R2_TY_ENDIAN_BIG_NOT_LITTLE = 0x1 << B2R2_TY_ENDIAN_SHIFT,
+
+ /* Secureness of the target memory region */
+ B2R2_TY_SECURE_UNSECURE = 0x0 << B2R2_TY_SECURE_SHIFT,
+ B2R2_TY_SECURE_SECURE = 0x1 << B2R2_TY_SECURE_SHIFT,
+
+ /* Dummy to make sure the data type is large enough */
+ B2R2_TY_DUMMY = 0xffffffff,
+};
+
+/* B2R2_TTY register values */
+#define B2R2_TTY_CB_NOT_CR_SHIFT 22
+#define B2R2_TTY_RGB_ROUND_SHIFT 26
+#define B2R2_TTY_CHROMA_NOT_LUMA_SHIFT 27
+enum b2r2_tty {
+
+ /* Chroma component selection */
+ B2R2_TTY_CB_NOT_CR = 0x1 << B2R2_TTY_CB_NOT_CR_SHIFT,
+
+ /* RGB rounding mode */
+ B2R2_TTY_RGB_ROUND_NORMAL = 0x0 << B2R2_TTY_RGB_ROUND_SHIFT,
+ B2R2_TTY_RGB_ROUND_DITHER = 0x1 << B2R2_TTY_RGB_ROUND_SHIFT,
+
+ /* Component selection for splitted frame buffer formats */
+ B2R2_TTY_CHROMA_NOT_LUMA = 0x1 << B2R2_TTY_CHROMA_NOT_LUMA_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S1TY_A1_SUBST_SHIFT 22
+#define B2R2_S1TY_ROTATION_SHIFT 27
+#define B2R2_S1TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s1ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S1TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S1TY_A1_SUBST_SHIFT,
+
+ /* Input rectangle rotation (NOT YET IMPLEMENTED) */
+ B2R2_S1TY_ENABLE_ROTATION = 0x1 << B2R2_S1TY_ROTATION_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S1TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+ B2R2_S1TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S2TY_A1_SUBST_SHIFT 22
+#define B2R2_S2TY_CHROMA_LEFT_SHIFT 26
+#define B2R2_S2TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s2ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S2TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S2TY_A1_SUBST_SHIFT,
+
+ /* Chroma left extension */
+ B2R2_S2TY_CHROMA_LEFT_EXT_FOLLOWING_PIXEL = 0x0
+ << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+ B2R2_S2TY_CHROMA_LEFT_EXT_AVERAGE = 0x1 << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S2TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+ B2R2_S2TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S3TY_BLANK_ACC_SHIFT 26
+enum b2r2_s3ty {
+ /* Enables "blank" access on this source (nothing will be fetched from
+ memory) */
+ B2R2_S3TY_ENABLE_BLANK_ACCESS = 0x1 << B2R2_S3TY_BLANK_ACC_SHIFT,
+};
+
+/* B2R2_<S or T>XY register values */
+#define B2R2_XY_X_SHIFT 0
+#define B2R2_XY_Y_SHIFT 16
+
+/* B2R2_<S or T>SZ register values */
+#define B2R2_SZ_WIDTH_SHIFT 0
+#define B2R2_SZ_HEIGHT_SHIFT 16
+
+/* Clip window offset (top left coordinates) */
+#define B2R2_CWO_X_SHIFT 0
+#define B2R2_CWO_Y_SHIFT 16
+
+/* Clip window stop (bottom right coordinates) */
+#define B2R2_CWS_X_SHIFT 0
+#define B2R2_CWS_Y_SHIFT 16
+
+/* Color look-up table */
+enum b2r2_cco {
+ B2R2_CCO_CLUT_COLOR_CORRECTION = (1 << 16),
+ B2R2_CCO_CLUT_UPDATE = (1 << 18),
+ B2R2_CCO_CLUT_ON_S1 = (1 << 15)
+};
+
+/* Filter control (2D resize control) */
+enum b2r2_fctl {
+ /* Horizontal 2D filter mode */
+ B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(0),
+ B2R2_FCTL_HF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(1),
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER = BIT(2),
+
+ /* Vertical 2D filter mode */
+ B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(4),
+ B2R2_FCTL_VF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(5),
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER = BIT(6),
+
+ /* Alpha borders */
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_RIGHT = BIT(12),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_LEFT = BIT(13),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_BOTTOM = BIT(14),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_TOP = BIT(15),
+
+ /* Luma path horizontal 2D filter mode */
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER = BIT(24),
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER = BIT(25),
+
+ /* Luma path vertical 2D filter mode */
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER = BIT(28),
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER = BIT(29),
+};
+
+/* Resize scaling factor */
+#define B2R2_RSF_HSRC_INC_SHIFT 0
+#define B2R2_RSF_VSRC_INC_SHIFT 16
+
+/* Resizer initialization */
+#define B2R2_RZI_HSRC_INIT_SHIFT 0
+#define B2R2_RZI_HNB_REPEAT_SHIFT 12
+#define B2R2_RZI_VSRC_INIT_SHIFT 16
+#define B2R2_RZI_VNB_REPEAT_SHIFT 28
+
+/* Default values for the resizer */
+#define B2R2_RZI_DEFAULT_HNB_REPEAT (3 << B2R2_RZI_HNB_REPEAT_SHIFT)
+#define B2R2_RZI_DEFAULT_VNB_REPEAT (3 << B2R2_RZI_VNB_REPEAT_SHIFT)
+
+
+/* Bus plug configuration registers */
+enum b2r2_plug_opcode_size {
+ B2R2_PLUG_OPCODE_SIZE_8 = 0x3,
+ B2R2_PLUG_OPCODE_SIZE_16 = 0x4,
+ B2R2_PLUG_OPCODE_SIZE_32 = 0x5,
+ B2R2_PLUG_OPCODE_SIZE_64 = 0x6,
+};
+
+enum b2r2_plug_chunk_size {
+ B2R2_PLUG_CHUNK_SIZE_1 = 0x0,
+ B2R2_PLUG_CHUNK_SIZE_2 = 0x1,
+ B2R2_PLUG_CHUNK_SIZE_4 = 0x2,
+ B2R2_PLUG_CHUNK_SIZE_8 = 0x3,
+ B2R2_PLUG_CHUNK_SIZE_16 = 0x4,
+ B2R2_PLUG_CHUNK_SIZE_32 = 0x5,
+ B2R2_PLUG_CHUNK_SIZE_64 = 0x6,
+ B2R2_PLUG_CHUNK_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_message_size {
+ B2R2_PLUG_MESSAGE_SIZE_1 = 0x0,
+ B2R2_PLUG_MESSAGE_SIZE_2 = 0x1,
+ B2R2_PLUG_MESSAGE_SIZE_4 = 0x2,
+ B2R2_PLUG_MESSAGE_SIZE_8 = 0x3,
+ B2R2_PLUG_MESSAGE_SIZE_16 = 0x4,
+ B2R2_PLUG_MESSAGE_SIZE_32 = 0x5,
+ B2R2_PLUG_MESSAGE_SIZE_64 = 0x6,
+ B2R2_PLUG_MESSAGE_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_page_size {
+ B2R2_PLUG_PAGE_SIZE_64 = 0x0,
+ B2R2_PLUG_PAGE_SIZE_128 = 0x1,
+ B2R2_PLUG_PAGE_SIZE_256 = 0x2,
+};
+
+/* Default opcode size */
+#if defined(CONFIG_B2R2_OPSIZE_8)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_8
+#elif defined(CONFIG_B2R2_OPSIZE_16)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_16
+#elif defined(CONFIG_B2R2_OPSIZE_32)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_32
+#elif defined(CONFIG_B2R2_OPSIZE_64)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_64
+#else
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT 0
+#endif
+
+/* Default chunk size */
+#if defined(CONFIG_B2R2_CHSIZE_1)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_1
+#elif defined(CONFIG_B2R2_CHSIZE_2)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_2
+#elif defined(CONFIG_B2R2_CHSIZE_4)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_4
+#elif defined(CONFIG_B2R2_CHSIZE_8)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_8
+#elif defined(CONFIG_B2R2_CHSIZE_16)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_16
+#elif defined(CONFIG_B2R2_CHSIZE_32)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_32
+#elif defined(CONFIG_B2R2_CHSIZE_64)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_64
+#elif defined(CONFIG_B2R2_CHSIZE_128)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_128
+#else
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT 0
+#endif
+
+/* Default message size */
+#if defined(CONFIG_B2R2_MGSIZE_1)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_1
+#elif defined(CONFIG_B2R2_MGSIZE_2)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_2
+#elif defined(CONFIG_B2R2_MGSIZE_4)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_4
+#elif defined(CONFIG_B2R2_MGSIZE_8)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_8
+#elif defined(CONFIG_B2R2_MGSIZE_16)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_16
+#elif defined(CONFIG_B2R2_MGSIZE_32)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_32
+#elif defined(CONFIG_B2R2_MGSIZE_64)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_64
+#elif defined(CONFIG_B2R2_MGSIZE_128)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_128
+#else
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT 0
+#endif
+
+/* Default page size */
+#if defined(CONFIG_B2R2_PGSIZE_64)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_64
+#elif defined(CONFIG_B2R2_PGSIZE_128)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_128
+#elif defined(CONFIG_B2R2_PGSIZE_256)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_256
+#else
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT 0
+#endif
+
+#endif /* B2R2_HW_H__ */
diff --git a/drivers/video/b2r2/b2r2_hw_convert.c b/drivers/video/b2r2/b2r2_hw_convert.c
new file mode 100644
index 00000000000..d1b44db79c3
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_hw_convert.c
@@ -0,0 +1,747 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_hw_convert.h"
+#include "b2r2_internal.h"
+#include "b2r2_utils.h"
+
+/*
+ * Macros and constants
+ */
+
+/* VMX register values for RGB to YUV color conversion */
+/* Magic numbers from 27.11 in DB8500_DesignSpecification_v2.5.pdf */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE 0x107e4beb
+#define B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE 0x0982581d
+#define B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE 0xfa9ea483
+#define B2R2_VMX3_RGB_TO_YUV_601_FULL_RANGE 0x08000080
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_RGB_TO_YUV_601_STANDARD 0x0e1e8bee
+#define B2R2_VMX1_RGB_TO_YUV_601_STANDARD 0x08420419
+#define B2R2_VMX2_RGB_TO_YUV_601_STANDARD 0xfb5ed471
+#define B2R2_VMX3_RGB_TO_YUV_601_STANDARD 0x08004080
+
+/* 709 Full range conversion matrix */
+#define B2R2_VMX0_RGB_TO_YUV_709_FULL_RANGE 0x107e27f4
+#define B2R2_VMX1_RGB_TO_YUV_709_FULL_RANGE 0x06e2dc13
+#define B2R2_VMX2_RGB_TO_YUV_709_FULL_RANGE 0xfc5e6c83
+#define B2R2_VMX3_RGB_TO_YUV_709_FULL_RANGE 0x08000080
+
+/* 709 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_RGB_TO_YUV_709_STANDARD 0x0e3e6bf5
+#define B2R2_VMX1_RGB_TO_YUV_709_STANDARD 0x05e27410
+#define B2R2_VMX2_RGB_TO_YUV_709_STANDARD 0xfcdea471
+#define B2R2_VMX3_RGB_TO_YUV_709_STANDARD 0x08004080
+
+/* VMX register values for YUV to RGB color conversion */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_YUV_TO_RGB_601_FULL_RANGE 0x2c440000
+#define B2R2_VMX1_YUV_TO_RGB_601_FULL_RANGE 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_RGB_601_FULL_RANGE 0x0004013f
+#define B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE 0x34f21322
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_YUV_TO_RGB_601_STANDARD 0x3324a800
+#define B2R2_VMX1_YUV_TO_RGB_601_STANDARD 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_RGB_601_STANDARD 0x0004a957
+#define B2R2_VMX3_YUV_TO_RGB_601_STANDARD 0x32121eeb
+
+/* 709 Full range conversion matrix */
+#define B2R2_VMX0_YUV_TO_RGB_709_FULL_RANGE 0x31440000
+#define B2R2_VMX1_YUV_TO_RGB_709_FULL_RANGE 0xf16403d1
+#define B2R2_VMX2_YUV_TO_RGB_709_FULL_RANGE 0x00040145
+#define B2R2_VMX3_YUV_TO_RGB_709_FULL_RANGE 0x33b14b18
+
+/* 709 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_YUV_TO_RGB_709_STANDARD 0x3964a800
+#define B2R2_VMX1_YUV_TO_RGB_709_STANDARD 0xef04abc9
+#define B2R2_VMX2_YUV_TO_RGB_709_STANDARD 0x0004a95f
+#define B2R2_VMX3_YUV_TO_RGB_709_STANDARD 0x307132df
+
+/* VMX register values for RGB to BGR conversion */
+#define B2R2_VMX0_RGB_TO_BGR 0x00000100
+#define B2R2_VMX1_RGB_TO_BGR 0x00040000
+#define B2R2_VMX2_RGB_TO_BGR 0x20000000
+#define B2R2_VMX3_RGB_TO_BGR 0x00000000
+
+/* VMX register values for BGR to YUV color conversion */
+/* Note: All BGR -> YUV values are calculated by multiplying
+ * the RGB -> YUV matrices [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |0 1 0|
+ * |1 0 0|
+ * Essentially swapping first and third columns in
+ * the matrices (VMX0, VMX1 and VMX2 values).
+ * The offset vector VMX3 remains untouched.
+ * Put another way, the value of bits 0 through 9
+ * is swapped with the value of
+ * bits 20 through 31 in VMX0, VMX1 and VMX2,
+ * taking into consideration the compression
+ * that is used on bits 0 through 9. Bit 0 being LSB.
+ */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_BGR_TO_YUV_601_FULL_RANGE 0xfd7e4883
+#define B2R2_VMX1_BGR_TO_YUV_601_FULL_RANGE 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YUV_601_FULL_RANGE 0x107ea7d4
+#define B2R2_VMX3_BGR_TO_YUV_601_FULL_RANGE 0x08000080
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_BGR_TO_YUV_601_STANDARD 0xfdde8870
+#define B2R2_VMX1_BGR_TO_YUV_601_STANDARD 0x03220442
+#define B2R2_VMX2_BGR_TO_YUV_601_STANDARD 0x0e3ed7da
+#define B2R2_VMX3_BGR_TO_YUV_601_STANDARD 0x08004080
+
+/* 709 Full range conversion matrix */
+#define B2R2_VMX0_BGR_TO_YUV_709_FULL_RANGE 0xfe9e2483
+#define B2R2_VMX1_BGR_TO_YUV_709_FULL_RANGE 0x0262dc37
+#define B2R2_VMX2_BGR_TO_YUV_709_FULL_RANGE 0x107e6fe2
+#define B2R2_VMX3_BGR_TO_YUV_709_FULL_RANGE 0x08000080
+
+/* 709 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_BGR_TO_YUV_709_STANDARD 0xfebe6871
+#define B2R2_VMX1_BGR_TO_YUV_709_STANDARD 0x0202742f
+#define B2R2_VMX2_BGR_TO_YUV_709_STANDARD 0x0e3ea7e6
+#define B2R2_VMX3_BGR_TO_YUV_709_STANDARD 0x08004080
+
+
+/* VMX register values for YUV to BGR conversion */
+/* Note: All YUV -> BGR values are constructed
+ * from the YUV -> RGB ones, by swapping
+ * first and third rows in the matrix
+ * (VMX0 and VMX2 values). Further, the first and
+ * third values in the offset vector need to be
+ * swapped as well, i.e. bits 0 through 9 are swapped
+ * with bits 20 through 29 in the VMX3 value.
+ * Bit 0 being LSB.
+ */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_YUV_TO_BGR_601_FULL_RANGE (B2R2_VMX2_YUV_TO_RGB_601_FULL_RANGE)
+#define B2R2_VMX1_YUV_TO_BGR_601_FULL_RANGE (B2R2_VMX1_YUV_TO_RGB_601_FULL_RANGE)
+#define B2R2_VMX2_YUV_TO_BGR_601_FULL_RANGE (B2R2_VMX0_YUV_TO_RGB_601_FULL_RANGE)
+#define B2R2_VMX3_YUV_TO_BGR_601_FULL_RANGE 0x3222134f
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_YUV_TO_BGR_601_STANDARD (B2R2_VMX2_YUV_TO_RGB_601_STANDARD)
+#define B2R2_VMX1_YUV_TO_BGR_601_STANDARD (B2R2_VMX1_YUV_TO_RGB_601_STANDARD)
+#define B2R2_VMX2_YUV_TO_BGR_601_STANDARD (B2R2_VMX0_YUV_TO_RGB_601_STANDARD)
+#define B2R2_VMX3_YUV_TO_BGR_601_STANDARD 0x2eb21f21
+
+/* 709 Full range conversion matrix */
+#define B2R2_VMX0_YUV_TO_BGR_709_FULL_RANGE (B2R2_VMX2_YUV_TO_RGB_709_FULL_RANGE)
+#define B2R2_VMX1_YUV_TO_BGR_709_FULL_RANGE (B2R2_VMX1_YUV_TO_RGB_709_FULL_RANGE)
+#define B2R2_VMX2_YUV_TO_BGR_709_FULL_RANGE (B2R2_VMX0_YUV_TO_RGB_709_FULL_RANGE)
+#define B2R2_VMX3_YUV_TO_BGR_709_FULL_RANGE 0x31814b3b
+
+/* 709 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_YUV_TO_BGR_709_STANDARD (B2R2_VMX2_YUV_TO_RGB_709_STANDARD)
+#define B2R2_VMX1_YUV_TO_BGR_709_STANDARD (B2R2_VMX1_YUV_TO_RGB_709_STANDARD)
+#define B2R2_VMX2_YUV_TO_BGR_709_STANDARD (B2R2_VMX0_YUV_TO_RGB_709_STANDARD)
+#define B2R2_VMX3_YUV_TO_BGR_709_STANDARD 0x2df13307
+
+
+/* VMX register values for YVU to RGB conversion */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_YVU_TO_RGB_601_FULL_RANGE 0x00040120
+#define B2R2_VMX1_YVU_TO_RGB_601_FULL_RANGE 0xF544034D
+#define B2R2_VMX2_YVU_TO_RGB_601_FULL_RANGE 0x37840000
+#define B2R2_VMX3_YVU_TO_RGB_601_FULL_RANGE (B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE)
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_YVU_TO_RGB_601_STANDARD 0x0004A999
+#define B2R2_VMX1_YVU_TO_RGB_601_STANDARD 0xF384AB30
+#define B2R2_VMX2_YVU_TO_RGB_601_STANDARD 0x40A4A800
+#define B2R2_VMX3_YVU_TO_RGB_601_STANDARD (B2R2_VMX3_YUV_TO_RGB_601_STANDARD)
+
+/* VMX register values for RGB to YVU conversion */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX1_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX2_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX3_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX3_RGB_TO_YUV_601_FULL_RANGE)
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_RGB_TO_YVU_601_STANDARD (B2R2_VMX2_RGB_TO_YUV_601_STANDARD)
+#define B2R2_VMX1_RGB_TO_YVU_601_STANDARD (B2R2_VMX1_RGB_TO_YUV_601_STANDARD)
+#define B2R2_VMX2_RGB_TO_YVU_601_STANDARD (B2R2_VMX0_RGB_TO_YUV_601_STANDARD)
+#define B2R2_VMX3_RGB_TO_YVU_601_STANDARD (B2R2_VMX3_RGB_TO_YUV_601_STANDARD)
+
+/* VMX register values for YVU to BGR conversion */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_YVU_TO_BGR_601_FULL_RANGE (B2R2_VMX2_YVU_TO_RGB_601_FULL_RANGE)
+#define B2R2_VMX1_YVU_TO_BGR_601_FULL_RANGE (B2R2_VMX1_YVU_TO_RGB_601_FULL_RANGE)
+#define B2R2_VMX2_YVU_TO_BGR_601_FULL_RANGE (B2R2_VMX0_YVU_TO_RGB_601_FULL_RANGE)
+#define B2R2_VMX3_YVU_TO_BGR_601_FULL_RANGE 0x3222134F
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_YVU_TO_BGR_601_STANDARD (B2R2_VMX2_YVU_TO_RGB_601_STANDARD)
+#define B2R2_VMX1_YVU_TO_BGR_601_STANDARD (B2R2_VMX1_YVU_TO_RGB_601_STANDARD)
+#define B2R2_VMX2_YVU_TO_BGR_601_STANDARD (B2R2_VMX0_YVU_TO_RGB_601_STANDARD)
+#define B2R2_VMX3_YVU_TO_BGR_601_STANDARD 0x3222134F
+
+/* VMX register values for BGR to YVU conversion */
+
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX2_BGR_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX1_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX1_BGR_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX2_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX0_BGR_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX3_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX3_BGR_TO_YUV_601_FULL_RANGE)
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_BGR_TO_YVU_601_STANDARD (B2R2_VMX2_BGR_TO_YUV_601_STANDARD)
+#define B2R2_VMX1_BGR_TO_YVU_601_STANDARD (B2R2_VMX1_BGR_TO_YUV_601_STANDARD)
+#define B2R2_VMX2_BGR_TO_YVU_601_STANDARD (B2R2_VMX0_BGR_TO_YUV_601_STANDARD)
+#define B2R2_VMX3_BGR_TO_YVU_601_STANDARD (B2R2_VMX3_BGR_TO_YUV_601_STANDARD)
+
+/* VMX register values for YVU to YUV conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/* Internally, the components are in fact stored
+ * with luma in the middle, i.e. UYV, which is why
+ * the values are just like for RGB->BGR conversion.
+ */
+#define B2R2_VMX0_YVU_TO_YUV 0x00000100
+#define B2R2_VMX1_YVU_TO_YUV 0x00040000
+#define B2R2_VMX2_YVU_TO_YUV 0x20000000
+#define B2R2_VMX3_YVU_TO_YUV 0x00000000
+
+/* VMX register values for RGB to BLT_YUV888 conversion */
+
+/*
+ * BLT_YUV888 has color components laid out in memory as V, U, Y, (Alpha)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ * Note: RGB -> BLT_YUV888 values are calculated by multiplying
+ * the RGB -> YUV matrix [A], with [S] to form [S]x[A] where
+ * |0 1 0|
+ * S = |0 0 1|
+ * |1 0 0|
+ * Essentially changing the order of rows in the original
+ * matrix [A].
+ * row1 -> row3
+ * row2 -> row1
+ * row3 -> row2
+ * Values in the offset vector are swapped in the same manner.
+ */
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_FULL_RANGE (B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_FULL_RANGE (B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_FULL_RANGE (B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE)
+#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_FULL_RANGE 0x00020080
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_STANDARD (B2R2_VMX1_RGB_TO_YUV_601_STANDARD)
+#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_STANDARD (B2R2_VMX2_RGB_TO_YUV_601_STANDARD)
+#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_STANDARD (B2R2_VMX0_RGB_TO_YUV_601_STANDARD)
+#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_STANDARD 0x00020080
+
+/* VMX register values for BLT_YUV888 to RGB conversion */
+
+/*
+ * Note: BLT_YUV888 -> RGB values are calculated by multiplying
+ * the YUV -> RGB matrix [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |1 0 0|
+ * |0 1 0|
+ * Essentially changing the order of columns in the original
+ * matrix [A].
+ * col1 -> col3
+ * col2 -> col1
+ * col3 -> col2
+ * Values in the offset vector remain unchanged.
+ */
+/* 601 Full range conversion matrix */
+#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_FULL_RANGE 0x20000121
+#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_FULL_RANGE 0x201ea74c
+#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_FULL_RANGE 0x2006f000
+#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_FULL_RANGE (B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE)
+
+/* 601 Standard (clamped) conversion matrix */
+#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_STANDARD 0x25400133
+#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_STANDARD 0x255E7330
+#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_STANDARD 0x25481400
+#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_STANDARD (B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE)
+
+/* VMX register values for YUV to BLT_YUV888 conversion */
+#define B2R2_VMX0_YUV_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YUV_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX2_YUV_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX3_YUV_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YUV conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YUV 0x00000100
+#define B2R2_VMX1_BLT_YUV888_TO_YUV 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YUV 0x00040000
+#define B2R2_VMX3_BLT_YUV888_TO_YUV 0x00000000
+
+/* VMX register values for YVU to BLT_YUV888 conversion */
+#define B2R2_VMX0_YVU_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YVU_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX2_YVU_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX3_YVU_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YVU conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YVU 0x00040000
+#define B2R2_VMX1_BLT_YUV888_TO_YVU 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YVU 0x00000100
+#define B2R2_VMX3_BLT_YUV888_TO_YVU 0x00000000
+
+/*
+ * Internal types
+ */
+
+/*
+ * Global variables
+ */
+
+ /**
+ * VMx values for color space conversion
+ * (component swap)
+ */
+static const u32 vmx_yuv_to_blt_yuv888[] = {
+ B2R2_VMX0_YUV_TO_BLT_YUV888,
+ B2R2_VMX1_YUV_TO_BLT_YUV888,
+ B2R2_VMX2_YUV_TO_BLT_YUV888,
+ B2R2_VMX3_YUV_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yuv[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YUV,
+ B2R2_VMX1_BLT_YUV888_TO_YUV,
+ B2R2_VMX2_BLT_YUV888_TO_YUV,
+ B2R2_VMX3_BLT_YUV888_TO_YUV,
+};
+
+static const u32 vmx_yvu_to_blt_yuv888[] = {
+ B2R2_VMX0_YVU_TO_BLT_YUV888,
+ B2R2_VMX1_YVU_TO_BLT_YUV888,
+ B2R2_VMX2_YVU_TO_BLT_YUV888,
+ B2R2_VMX3_YVU_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yvu[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YVU,
+ B2R2_VMX1_BLT_YUV888_TO_YVU,
+ B2R2_VMX2_BLT_YUV888_TO_YVU,
+ B2R2_VMX3_BLT_YUV888_TO_YVU,
+};
+
+static const u32 vmx_rgb_to_bgr[] = {
+ B2R2_VMX0_RGB_TO_BGR,
+ B2R2_VMX1_RGB_TO_BGR,
+ B2R2_VMX2_RGB_TO_BGR,
+ B2R2_VMX3_RGB_TO_BGR,
+};
+
+static const u32 vmx_yvu_to_yuv[] = {
+ B2R2_VMX0_YVU_TO_YUV,
+ B2R2_VMX1_YVU_TO_YUV,
+ B2R2_VMX2_YVU_TO_YUV,
+ B2R2_VMX3_YVU_TO_YUV,
+};
+
+/**
+ * VMx values for color space conversions
+ * (standard 601 conversions)
+ */
+static const u32 vmx_rgb_to_yuv[] = {
+ B2R2_VMX0_RGB_TO_YUV_601_STANDARD,
+ B2R2_VMX1_RGB_TO_YUV_601_STANDARD,
+ B2R2_VMX2_RGB_TO_YUV_601_STANDARD,
+ B2R2_VMX3_RGB_TO_YUV_601_STANDARD,
+};
+
+static const u32 vmx_yuv_to_rgb[] = {
+ B2R2_VMX0_YUV_TO_RGB_601_STANDARD,
+ B2R2_VMX1_YUV_TO_RGB_601_STANDARD,
+ B2R2_VMX2_YUV_TO_RGB_601_STANDARD,
+ B2R2_VMX3_YUV_TO_RGB_601_STANDARD,
+};
+
+static const u32 vmx_rgb_to_blt_yuv888[] = {
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_STANDARD,
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_STANDARD,
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_STANDARD,
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_STANDARD,
+};
+
+static const u32 vmx_blt_yuv888_to_rgb[] = {
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_STANDARD,
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_STANDARD,
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_STANDARD,
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_STANDARD,
+};
+
+static const u32 vmx_rgb_to_yvu[] = {
+ B2R2_VMX0_RGB_TO_YVU_601_STANDARD,
+ B2R2_VMX1_RGB_TO_YVU_601_STANDARD,
+ B2R2_VMX2_RGB_TO_YVU_601_STANDARD,
+ B2R2_VMX3_RGB_TO_YVU_601_STANDARD,
+};
+
+static const u32 vmx_yvu_to_rgb[] = {
+ B2R2_VMX0_YVU_TO_RGB_601_STANDARD,
+ B2R2_VMX1_YVU_TO_RGB_601_STANDARD,
+ B2R2_VMX2_YVU_TO_RGB_601_STANDARD,
+ B2R2_VMX3_YVU_TO_RGB_601_STANDARD,
+};
+
+static const u32 vmx_bgr_to_yuv[] = {
+ B2R2_VMX0_BGR_TO_YUV_601_STANDARD,
+ B2R2_VMX1_BGR_TO_YUV_601_STANDARD,
+ B2R2_VMX2_BGR_TO_YUV_601_STANDARD,
+ B2R2_VMX3_BGR_TO_YUV_601_STANDARD,
+};
+
+static const u32 vmx_yuv_to_bgr[] = {
+ B2R2_VMX0_YUV_TO_BGR_601_STANDARD,
+ B2R2_VMX1_YUV_TO_BGR_601_STANDARD,
+ B2R2_VMX2_YUV_TO_BGR_601_STANDARD,
+ B2R2_VMX3_YUV_TO_BGR_601_STANDARD,
+};
+
+static const u32 vmx_bgr_to_yvu[] = {
+ B2R2_VMX0_BGR_TO_YVU_601_STANDARD,
+ B2R2_VMX1_BGR_TO_YVU_601_STANDARD,
+ B2R2_VMX2_BGR_TO_YVU_601_STANDARD,
+ B2R2_VMX3_BGR_TO_YVU_601_STANDARD,
+};
+
+static const u32 vmx_yvu_to_bgr[] = {
+ B2R2_VMX0_YVU_TO_BGR_601_STANDARD,
+ B2R2_VMX1_YVU_TO_BGR_601_STANDARD,
+ B2R2_VMX2_YVU_TO_BGR_601_STANDARD,
+ B2R2_VMX3_YVU_TO_BGR_601_STANDARD,
+};
+
+/**
+ * VMx values for color space conversions
+ * (full range conversions)
+ */
+
+static const u32 vmx_full_rgb_to_yuv[] = {
+ B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE,
+ B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE,
+ B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE,
+ B2R2_VMX3_RGB_TO_YUV_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_yuv_to_rgb[] = {
+ B2R2_VMX0_YUV_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX1_YUV_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX2_YUV_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_rgb_to_blt_yuv888[] = {
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_FULL_RANGE,
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_FULL_RANGE,
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_FULL_RANGE,
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_blt_yuv888_to_rgb[] = {
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_yvu_to_rgb[] = {
+ B2R2_VMX0_YVU_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX1_YVU_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX2_YVU_TO_RGB_601_FULL_RANGE,
+ B2R2_VMX3_YVU_TO_RGB_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_rgb_to_yvu[] = {
+ B2R2_VMX0_RGB_TO_YVU_601_FULL_RANGE,
+ B2R2_VMX1_RGB_TO_YVU_601_FULL_RANGE,
+ B2R2_VMX2_RGB_TO_YVU_601_FULL_RANGE,
+ B2R2_VMX3_RGB_TO_YVU_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_bgr_to_yuv[] = {
+ B2R2_VMX0_BGR_TO_YUV_601_FULL_RANGE,
+ B2R2_VMX1_BGR_TO_YUV_601_FULL_RANGE,
+ B2R2_VMX2_BGR_TO_YUV_601_FULL_RANGE,
+ B2R2_VMX3_BGR_TO_YUV_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_yuv_to_bgr[] = {
+ B2R2_VMX0_YUV_TO_BGR_601_FULL_RANGE,
+ B2R2_VMX1_YUV_TO_BGR_601_FULL_RANGE,
+ B2R2_VMX2_YUV_TO_BGR_601_FULL_RANGE,
+ B2R2_VMX3_YUV_TO_BGR_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_bgr_to_yvu[] = {
+ B2R2_VMX0_BGR_TO_YVU_601_FULL_RANGE,
+ B2R2_VMX1_BGR_TO_YVU_601_FULL_RANGE,
+ B2R2_VMX2_BGR_TO_YVU_601_FULL_RANGE,
+ B2R2_VMX3_BGR_TO_YVU_601_FULL_RANGE,
+};
+
+static const u32 vmx_full_yvu_to_bgr[] = {
+ B2R2_VMX0_YVU_TO_BGR_601_FULL_RANGE,
+ B2R2_VMX1_YVU_TO_BGR_601_FULL_RANGE,
+ B2R2_VMX2_YVU_TO_BGR_601_FULL_RANGE,
+ B2R2_VMX3_YVU_TO_BGR_601_FULL_RANGE,
+};
+
+/*
+ * Forward declaration of private functions
+ */
+
+/*
+ * Public functions
+ */
+
+/**
+ * Setup input versatile matrix for color space conversion
+ */
+int b2r2_setup_ivmx(struct b2r2_node *node, enum b2r2_color_conversion cc)
+{
+ const u32 *vmx = NULL;
+
+ if (b2r2_get_vmx(cc, &vmx) < 0 || vmx == NULL)
+ return -1;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = vmx[0];
+ node->node.GROUP15.B2R2_VMX1 = vmx[1];
+ node->node.GROUP15.B2R2_VMX2 = vmx[2];
+ node->node.GROUP15.B2R2_VMX3 = vmx[3];
+
+ return 0;
+}
+
+/**
+ * Setup output versatile matrix for color space conversion
+ */
+int b2r2_setup_ovmx(struct b2r2_node *node, enum b2r2_color_conversion cc)
+{
+ const u32 *vmx = NULL;
+
+ if (b2r2_get_vmx(cc, &vmx) < 0 || vmx == NULL)
+ return -1;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+
+ node->node.GROUP16.B2R2_VMX0 = vmx[0];
+ node->node.GROUP16.B2R2_VMX1 = vmx[1];
+ node->node.GROUP16.B2R2_VMX2 = vmx[2];
+ node->node.GROUP16.B2R2_VMX3 = vmx[3];
+
+ return 0;
+}
+
+enum b2r2_color_conversion b2r2_get_color_conversion(enum b2r2_blt_fmt src_fmt,
+ enum b2r2_blt_fmt dst_fmt, bool fullrange)
+{
+ if (b2r2_is_rgb_fmt(src_fmt)) {
+ if (b2r2_is_yvu_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_RGB_TO_YVU_FULL :
+ B2R2_CC_RGB_TO_YVU;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ return fullrange ? B2R2_CC_RGB_TO_BLT_YUV888_FULL :
+ B2R2_CC_RGB_TO_BLT_YUV888;
+ else if (b2r2_is_yuv_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_RGB_TO_YUV_FULL :
+ B2R2_CC_RGB_TO_YUV;
+ else if (b2r2_is_bgr_fmt(dst_fmt))
+ return B2R2_CC_RGB_TO_BGR;
+ } else if (b2r2_is_yvu_fmt(src_fmt)) {
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_YVU_FULL_TO_RGB :
+ B2R2_CC_YVU_TO_RGB;
+ else if (b2r2_is_bgr_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_YVU_FULL_TO_BGR :
+ B2R2_CC_YVU_TO_BGR;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return B2R2_CC_YVU_TO_BLT_YUV888;
+ else if (b2r2_is_yuv_fmt(dst_fmt) &&
+ !b2r2_is_yvu_fmt(dst_fmt))
+ return B2R2_CC_YVU_TO_YUV;
+ } else if (src_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ src_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ src_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_BLT_YUV888_FULL_TO_RGB :
+ B2R2_CC_BLT_YUV888_TO_RGB;
+ else if (b2r2_is_yvu_fmt(dst_fmt))
+ return B2R2_CC_BLT_YUV888_TO_YVU;
+ else if (b2r2_is_yuv_fmt(dst_fmt)) {
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return B2R2_CC_NOTHING;
+ default:
+ return B2R2_CC_BLT_YUV888_TO_YUV;
+ }
+ }
+ } else if (b2r2_is_yuv_fmt(src_fmt)) {
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_YUV_FULL_TO_RGB :
+ B2R2_CC_YUV_TO_RGB;
+ else if (b2r2_is_bgr_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_YUV_FULL_TO_BGR :
+ B2R2_CC_YUV_TO_BGR;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return B2R2_CC_YUV_TO_BLT_YUV888;
+ else if (b2r2_is_yvu_fmt(dst_fmt))
+ return B2R2_CC_YVU_TO_YUV;
+ } else if (b2r2_is_bgr_fmt(src_fmt)) {
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return B2R2_CC_RGB_TO_BGR;
+ else if (b2r2_is_yvu_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_BGR_TO_YVU_FULL :
+ B2R2_CC_BGR_TO_YVU;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ BUG_ON(1);
+ else if (b2r2_is_yuv_fmt(dst_fmt))
+ return fullrange ? B2R2_CC_BGR_TO_YUV_FULL :
+ B2R2_CC_BGR_TO_YUV;
+ }
+
+ return B2R2_CC_NOTHING;
+}
+
+int b2r2_get_vmx(enum b2r2_color_conversion cc, const u32 **vmx)
+{
+ if (vmx == NULL)
+ return -1;
+
+ switch (cc) {
+ case B2R2_CC_RGB_TO_BGR:
+ *vmx = &vmx_rgb_to_bgr[0];
+ break;
+ case B2R2_CC_BLT_YUV888_TO_YVU:
+ *vmx = &vmx_blt_yuv888_to_yvu[0];
+ break;
+ case B2R2_CC_BLT_YUV888_TO_YUV:
+ *vmx = &vmx_blt_yuv888_to_yuv[0];
+ break;
+ case B2R2_CC_YVU_TO_YUV:
+ *vmx = &vmx_yvu_to_yuv[0];
+ break;
+ case B2R2_CC_YVU_TO_BLT_YUV888:
+ *vmx = &vmx_yvu_to_blt_yuv888[0];
+ break;
+ case B2R2_CC_YUV_TO_BLT_YUV888:
+ *vmx = &vmx_yuv_to_blt_yuv888[0];
+ break;
+ case B2R2_CC_RGB_TO_YUV:
+ *vmx = &vmx_rgb_to_yuv[0];
+ break;
+ case B2R2_CC_RGB_TO_YUV_FULL:
+ *vmx = &vmx_full_rgb_to_yuv[0];
+ break;
+ case B2R2_CC_RGB_TO_YVU:
+ *vmx = &vmx_rgb_to_yvu[0];
+ break;
+ case B2R2_CC_RGB_TO_YVU_FULL:
+ *vmx = &vmx_full_rgb_to_yvu[0];
+ break;
+ case B2R2_CC_RGB_TO_BLT_YUV888:
+ *vmx = &vmx_rgb_to_blt_yuv888[0];
+ break;
+ case B2R2_CC_RGB_TO_BLT_YUV888_FULL:
+ *vmx = &vmx_full_rgb_to_blt_yuv888[0];
+ break;
+ case B2R2_CC_BGR_TO_YVU:
+ *vmx = &vmx_bgr_to_yvu[0];
+ break;
+ case B2R2_CC_BGR_TO_YVU_FULL:
+ *vmx = &vmx_full_bgr_to_yvu[0];
+ break;
+ case B2R2_CC_BGR_TO_YUV:
+ *vmx = &vmx_bgr_to_yuv[0];
+ break;
+ case B2R2_CC_BGR_TO_YUV_FULL:
+ *vmx = &vmx_full_bgr_to_yuv[0];
+ break;
+ case B2R2_CC_YUV_TO_RGB:
+ *vmx = &vmx_yuv_to_rgb[0];
+ break;
+ case B2R2_CC_YUV_FULL_TO_RGB:
+ *vmx = &vmx_full_yuv_to_rgb[0];
+ break;
+ case B2R2_CC_YUV_TO_BGR:
+ *vmx = &vmx_yuv_to_bgr[0];
+ break;
+ case B2R2_CC_YUV_FULL_TO_BGR:
+ *vmx = &vmx_full_yuv_to_bgr[0];
+ break;
+ case B2R2_CC_YVU_TO_RGB:
+ *vmx = &vmx_yvu_to_rgb[0];
+ break;
+ case B2R2_CC_YVU_FULL_TO_RGB:
+ *vmx = &vmx_full_yvu_to_rgb[0];
+ break;
+ case B2R2_CC_YVU_TO_BGR:
+ *vmx = &vmx_yvu_to_bgr[0];
+ break;
+ case B2R2_CC_YVU_FULL_TO_BGR:
+ *vmx = &vmx_full_yvu_to_bgr[0];
+ break;
+ case B2R2_CC_BLT_YUV888_TO_RGB:
+ *vmx = &vmx_blt_yuv888_to_rgb[0];
+ break;
+ case B2R2_CC_BLT_YUV888_FULL_TO_RGB:
+ *vmx = &vmx_full_blt_yuv888_to_rgb[0];
+ break;
+ case B2R2_CC_NOTHING:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
diff --git a/drivers/video/b2r2/b2r2_hw_convert.h b/drivers/video/b2r2/b2r2_hw_convert.h
new file mode 100644
index 00000000000..e173e898e44
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_hw_convert.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * ST-Ericsson B2R2 hw color conversion definitions
+ *
+ * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef B2R2_HW_CONVERT_H__
+#define B2R2_HW_CONVERT_H__
+
+#include "b2r2_internal.h"
+
+enum b2r2_color_conversion {
+ B2R2_CC_NOTHING = 0,
+ B2R2_CC_RGB_TO_BGR,
+ B2R2_CC_BLT_YUV888_TO_YVU,
+ B2R2_CC_BLT_YUV888_TO_YUV,
+ B2R2_CC_YUV_TO_BLT_YUV888,
+ B2R2_CC_YVU_TO_YUV,
+ B2R2_CC_YVU_TO_BLT_YUV888,
+ B2R2_CC_RGB_TO_YUV,
+ B2R2_CC_RGB_TO_YUV_FULL,
+ B2R2_CC_RGB_TO_YVU,
+ B2R2_CC_RGB_TO_YVU_FULL,
+ B2R2_CC_RGB_TO_BLT_YUV888,
+ B2R2_CC_RGB_TO_BLT_YUV888_FULL,
+ B2R2_CC_BGR_TO_YVU,
+ B2R2_CC_BGR_TO_YVU_FULL,
+ B2R2_CC_BGR_TO_YUV,
+ B2R2_CC_BGR_TO_YUV_FULL,
+ B2R2_CC_YUV_TO_RGB,
+ B2R2_CC_YUV_FULL_TO_RGB,
+ B2R2_CC_YUV_TO_BGR,
+ B2R2_CC_YUV_FULL_TO_BGR,
+ B2R2_CC_YVU_TO_RGB,
+ B2R2_CC_YVU_FULL_TO_RGB,
+ B2R2_CC_YVU_TO_BGR,
+ B2R2_CC_YVU_FULL_TO_BGR,
+ B2R2_CC_BLT_YUV888_TO_RGB,
+ B2R2_CC_BLT_YUV888_FULL_TO_RGB,
+};
+
+int b2r2_setup_ivmx(struct b2r2_node *node, enum b2r2_color_conversion cc);
+int b2r2_setup_ovmx(struct b2r2_node *node, enum b2r2_color_conversion cc);
+enum b2r2_color_conversion b2r2_get_color_conversion(enum b2r2_blt_fmt src_fmt,
+ enum b2r2_blt_fmt dst_fmt, bool fullrange);
+int b2r2_get_vmx(enum b2r2_color_conversion cc, const u32 **vmx);
+
+#endif /* B2R2_HW_CONVERT_H__ */
diff --git a/drivers/video/b2r2/b2r2_input_validation.c b/drivers/video/b2r2/b2r2_input_validation.c
new file mode 100644
index 00000000000..c8eb2f7b025
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include "b2r2_internal.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+
+#include <video/b2r2_blt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt);
+static bool is_valid_bg_format(enum b2r2_blt_fmt fmt);
+
+static bool is_valid_pitch_for_fmt(struct device *dev,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt);
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt);
+
+static bool validate_img(struct device *dev,
+ struct b2r2_blt_img *img);
+static bool validate_rect(struct device *dev,
+ struct b2r2_blt_rect *rect);
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_valid_bg_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+
+static bool is_valid_pitch_for_fmt(struct device *dev,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt)
+{
+ s32 complete_width;
+ u32 pitch_derived_from_width;
+
+ complete_width = width_2_complete_width(width, fmt);
+
+ pitch_derived_from_width = b2r2_calc_pitch_from_width(dev,
+ complete_width, fmt);
+
+ if (pitch < pitch_derived_from_width)
+ return false;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ if (!b2r2_is_aligned(pitch, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (!b2r2_is_aligned(pitch, 4))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ if (!b2r2_is_aligned(width, 4))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ if (!b2r2_is_aligned(width, 8))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return b2r2_align_up(width, 2);
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return b2r2_align_up(width, 16);
+
+ default:
+ return width;
+ }
+}
+
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(width, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(height, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(height, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool validate_img(struct device *dev,
+ struct b2r2_blt_img *img)
+{
+ /*
+ * So that we always can do width * height * bpp without overflowing a
+ * 32 bit signed integer. isqrt(s32_max / max_bpp) was used to
+ * calculate the value.
+ */
+ static const s32 max_img_width_height = 8191;
+
+ s32 img_size;
+
+ if (!is_valid_format(img->fmt)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!is_valid_format(img->fmt)\n");
+ return false;
+ }
+
+ if (img->width < 0 || img->width > max_img_width_height ||
+ img->height < 0 || img->height > max_img_width_height) {
+ b2r2_log_info(dev, "Validation Error: "
+ "img->width < 0 || "
+ "img->width > max_img_width_height || "
+ "img->height < 0 || "
+ "img->height > max_img_width_height\n");
+ return false;
+ }
+
+ if (b2r2_is_mb_fmt(img->fmt)) {
+ if (!is_complete_width_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt)\n");
+ return false;
+ }
+ } else {
+ if (0 == img->pitch &&
+ (!is_aligned_width_for_fmt(img->width, img->fmt) ||
+ !is_complete_width_for_fmt(img->width, img->fmt))) {
+ b2r2_log_info(dev,
+ "Validation Error: "
+ "0 == img->pitch && "
+ "(!is_aligned_width_for_fmt(img->width,"
+ " img->fmt) || "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt))\n");
+ return false;
+ }
+
+ if (img->pitch != 0 &&
+ !is_valid_pitch_for_fmt(dev, img->pitch, img->width,
+ img->fmt)) {
+ b2r2_log_info(dev,
+ "Validation Error: "
+ "img->pitch != 0 && "
+ "!is_valid_pitch_for_fmt(dev, "
+ "img->pitch, img->width, img->fmt)\n");
+ return false;
+ }
+ }
+
+ if (!is_valid_height_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!is_valid_height_for_fmt(img->width, img->fmt)\n");
+ return false;
+ }
+
+ img_size = b2r2_get_img_size(dev, img);
+
+ /*
+ * To keep the entire image inside s32 range.
+ */
+ if ((B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type ||
+ B2R2_BLT_PTR_FD_OFFSET == img->buf.type) &&
+ img->buf.offset > (u32)b2r2_s32_max - (u32)img_size) {
+ b2r2_log_info(dev, "Validation Error: "
+ "(B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == "
+ "img->buf.type || B2R2_BLT_PTR_FD_OFFSET == "
+ "img->buf.type) && img->buf.offset > "
+ "(u32)B2R2_MAX_S32 - (u32)img_size\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_rect(struct device *dev,
+ struct b2r2_blt_rect *rect)
+{
+ if (rect->width < 0 || rect->height < 0) {
+ b2r2_log_info(dev, "Validation Error: "
+ "rect->width < 0 || rect->height < 0\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool b2r2_validate_user_req(struct device *dev,
+ struct b2r2_blt_req *req)
+{
+ bool is_src_img_used;
+ bool is_bg_img_used;
+ bool is_src_mask_used;
+ bool is_dst_clip_rect_used;
+
+ if (req->size != sizeof(struct b2r2_blt_req)) {
+ b2r2_log_err(dev, "Validation Error: "
+ "req->size != sizeof(struct b2r2_blt_req)\n");
+ return false;
+ }
+
+ is_src_img_used = !(req->flags & B2R2_BLT_FLAG_SOURCE_FILL ||
+ req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW);
+ is_bg_img_used = (req->flags & B2R2_BLT_FLAG_BG_BLEND);
+ is_src_mask_used = req->flags & B2R2_BLT_FLAG_SOURCE_MASK;
+ is_dst_clip_rect_used = req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP;
+
+ if (is_src_img_used || is_src_mask_used) {
+ if (!validate_rect(dev, &req->src_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_rect(dev, &req->src_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_rect(dev, &req->dst_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_rect(dev, &req->dst_rect)\n");
+ return false;
+ }
+
+ if (is_bg_img_used) {
+ if (!validate_rect(dev, &req->bg_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_rect(dev, &req->bg_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_dst_clip_rect_used) {
+ if (!validate_rect(dev, &req->dst_clip_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_rect(dev, &req->dst_clip_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_img_used) {
+ struct b2r2_blt_rect src_img_bounding_rect;
+
+ if (!validate_img(dev, &req->src_img)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_img(dev, &req->src_img)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_img,
+ &src_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_img_bounding_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_bg_img_used) {
+ struct b2r2_blt_rect bg_img_bounding_rect;
+
+ if (!validate_img(dev, &req->bg_img)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_img(dev, &req->bg_img)\n");
+ return false;
+ }
+
+ if (!is_valid_bg_format(req->bg_img.fmt)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!is_valid_bg_format(req->bg_img->fmt)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->bg_img,
+ &bg_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->bg_rect,
+ &bg_img_bounding_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->bg_rect, "
+ "&bg_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_mask_used) {
+ struct b2r2_blt_rect src_mask_bounding_rect;
+
+ if (!validate_img(dev, &req->src_mask)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_img(dev, &req->src_mask)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_mask,
+ &src_mask_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_mask_bounding_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_mask_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_img(dev, &req->dst_img)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!validate_img(dev, &req->dst_img)\n");
+ return false;
+ }
+
+ if (is_bg_img_used) {
+ if (!b2r2_is_rect_gte_rect(&req->bg_rect, &req->dst_rect)) {
+ b2r2_log_info(dev, "Validation Error: "
+ "!b2r2_is_rect_gte_rect(&req->bg_rect, "
+ "&req->dst_rect)\n");
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/video/b2r2/b2r2_input_validation.h b/drivers/video/b2r2/b2r2_input_validation.h
new file mode 100644
index 00000000000..25f022a45ab
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+bool b2r2_validate_user_req(struct device *dev,
+ struct b2r2_blt_req *req);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h
new file mode 100644
index 00000000000..329d644e5e1
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_internal.h
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <video/b2r2_blt.h>
+
+#include "b2r2_global.h"
+#include "b2r2_hw.h"
+
+/**
+ * B2R2_MAX_NBR_DEVICES - The maximum number of B2R2s handled
+ */
+#define B2R2_MAX_NBR_DEVICES 2
+
+/* The maximum possible number of temporary buffers needed */
+#define MAX_TMP_BUFS_NEEDED 2
+
+/* Size of the color look-up table */
+#define CLUT_SIZE 1024
+
+/* The defined bits of the Interrupt Status Register */
+#define B2R2_ITS_MASK 0x0FFFF0FF
+
+/**
+ * b2r2_op_type - the type of B2R2 operation to configure
+ */
+enum b2r2_op_type {
+ B2R2_DIRECT_COPY,
+ B2R2_DIRECT_FILL,
+ B2R2_COPY,
+ B2R2_FILL,
+ B2R2_SCALE,
+ B2R2_ROTATE,
+ B2R2_SCALE_AND_ROTATE,
+ B2R2_FLIP,
+};
+
+/**
+ * b2r2_fmt_type - the type of buffer for a given format
+ */
+enum b2r2_fmt_type {
+ B2R2_FMT_TYPE_RASTER,
+ B2R2_FMT_TYPE_SEMI_PLANAR,
+ B2R2_FMT_TYPE_PLANAR,
+};
+
+/**
+ * b2r2_fmt_conv - the type of format conversion to do
+ */
+enum b2r2_fmt_conv {
+ B2R2_FMT_CONV_NONE,
+ B2R2_FMT_CONV_RGB_TO_YUV,
+ B2R2_FMT_CONV_YUV_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_YUV,
+ B2R2_FMT_CONV_RGB_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_YUV,
+};
+
+/**
+ * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to
+ *
+ * @B2R2_CORE_QUEUE_AQ1: Application queue 1
+ * @B2R2_CORE_QUEUE_AQ2: Application queue 2
+ * @B2R2_CORE_QUEUE_AQ3: Application queue 3
+ * @B2R2_CORE_QUEUE_AQ4: Application queue 4
+ * @B2R2_CORE_QUEUE_CQ1: Composition queue 1
+ * @B2R2_CORE_QUEUE_CQ2: Composition queue 2
+ * @B2R2_CORE_QUEUE_NO_OF: Number of queues
+ */
+enum b2r2_core_queue {
+ B2R2_CORE_QUEUE_AQ1 = 0,
+ B2R2_CORE_QUEUE_AQ2,
+ B2R2_CORE_QUEUE_AQ3,
+ B2R2_CORE_QUEUE_AQ4,
+ B2R2_CORE_QUEUE_CQ1,
+ B2R2_CORE_QUEUE_CQ2,
+ B2R2_CORE_QUEUE_NO_OF,
+};
+
+#define B2R2_NUM_APPLICATIONS_QUEUES 4
+
+/**
+ * enum b2r2_core_job_state - Indicates the current state of the job
+ *
+ * @B2R2_CORE_JOB_IDLE: Never queued
+ * @B2R2_CORE_JOB_QUEUED: In queue but not started yet
+ * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2
+ * @B2R2_CORE_JOB_DONE: Completed
+ * @B2R2_CORE_JOB_CANCELED: Canceled
+ */
+enum b2r2_core_job_state {
+ B2R2_CORE_JOB_IDLE = 0,
+ B2R2_CORE_JOB_QUEUED,
+ B2R2_CORE_JOB_RUNNING,
+ B2R2_CORE_JOB_DONE,
+ B2R2_CORE_JOB_CANCELED,
+};
+
+/**
+ * b2r2_work_buf - specification for a temporary work buffer
+ *
+ * @size - the size of the buffer (set by b2r2_node_split)
+ * @phys_addr - the physical address of the buffer (set by b2r2_blt_main)
+ */
+struct b2r2_work_buf {
+ u32 size;
+ u32 phys_addr;
+ void *virt_addr;
+ u32 mem_handle;
+};
+
+struct tmp_buf {
+ struct b2r2_work_buf buf;
+ bool in_use;
+};
+
+/**
+ * struct b2r2_control_instance - Represents the B2R2 instance
+ * (one per open and blitter core)
+ *
+ * @lock: Lock to protect the instance
+ * @control_id: The b2r2 core core control identifier
+ * @control: The b2r2 core control entity
+ *
+ * @report_list: Ready requests that should be reported,
+ * @report_list_waitq: Wait queue for report list
+ * @no_of_active_requests: Number of requests added but not reported
+ * in callback.
+ * @synching: true if any client is waiting for b2r2_blt_synch(0)
+ * @synch_done_waitq: Wait queue to handle synching on request_id 0
+ */
+struct b2r2_control_instance {
+ struct mutex lock;
+ int control_id;
+ struct b2r2_control *control;
+
+ /* Requests to be reported */
+ struct list_head report_list;
+ wait_queue_head_t report_list_waitq;
+
+ /* Below for synching */
+ u32 no_of_active_requests;
+ bool synching;
+ wait_queue_head_t synch_done_waitq;
+};
+
+/**
+ * struct b2r2_node - Represents a B2R2 node with reqister values, executed
+ * by B2R2. Should be allocated non-cached.
+ *
+ * @next: Next node
+ * @physical_address: Physical address to be given to B2R2
+ * (physical address of "node" member below)
+ * @node: The B2R2 node with register settings. This is the data
+ * that B2R2 will use.
+ *
+ */
+struct b2r2_node {
+ struct b2r2_node *next;
+ u32 physical_address;
+
+ int src_tmp_index;
+ int dst_tmp_index;
+
+ int src_index;
+
+ /* B2R2 regs comes here */
+ struct b2r2_link_list node;
+};
+
+/**
+ * struct b2r2_resolved_buf - Contains calculated information about
+ * image buffers.
+ *
+ * @physical_address: Physical address of the buffer
+ * @virtual_address: Virtual address of the buffer
+ * @is_pmem: true if buffer is from pmem
+ * @hwmem_session: Hwmem session
+ * @hwmem_alloc: Hwmem alloc
+ * @filep: File pointer of mapped file (like pmem device, frame buffer device)
+ * @file_physical_start: Physical address of file start
+ * @file_virtual_start: Virtual address of file start
+ * @file_len: File len
+ *
+ */
+struct b2r2_resolved_buf {
+ u32 physical_address;
+ void *virtual_address;
+ bool is_pmem;
+ struct hwmem_alloc *hwmem_alloc;
+ /* Data for validation below */
+ struct file *filep;
+ u32 file_physical_start;
+ u32 file_virtual_start;
+ u32 file_len;
+};
+
+/**
+ * b2r2_node_split_buf - information about a source or destination buffer
+ *
+ * @addr - the physical base address
+ * @chroma_addr - the physical address of the chroma plane
+ * @chroma_cr_addr - the physical address of the Cr chroma plane
+ * @fmt - the buffer format
+ * @fmt_type - the buffer format type
+ * @rect - the rectangle of the buffer to use
+ * @color - the color value to use is case of a fill operation
+ * @pitch - the pixmap byte pitch
+ * @height - the pixmap height
+ * @alpha_range - the alpha range of the buffer (0-128 or 0-255)
+ * @hso - the horizontal scan order
+ * @vso - the vertical scan order
+ * @endian - the endianess of the buffer
+ * @plane_selection - the plane to write if buffer is planar or semi-planar
+ */
+struct b2r2_node_split_buf {
+ u32 addr;
+ u32 chroma_addr;
+ u32 chroma_cr_addr;
+
+ enum b2r2_blt_fmt fmt;
+ enum b2r2_fmt_type type;
+
+ struct b2r2_blt_rect rect;
+ struct b2r2_blt_rect win;
+
+ s32 dx;
+ s32 dy;
+
+ u32 color;
+ u16 pitch;
+ u16 width;
+ u16 height;
+
+ enum b2r2_ty alpha_range;
+ enum b2r2_ty hso;
+ enum b2r2_ty vso;
+ enum b2r2_ty endian;
+ enum b2r2_tty dither;
+
+ /* Plane selection (used when writing to a multibuffer format) */
+ enum b2r2_tty plane_selection;
+
+ /* Chroma plane selection (used when writing planar formats) */
+ enum b2r2_tty chroma_selection;
+
+ int tmp_buf_index;
+};
+
+/**
+ * b2r2_node_split_job - an instance of a node split job
+ *
+ * @type - the type of operation
+ * @ivmx - the ivmx matrix to use for color conversion
+ * @blend - determines if blending is enabled
+ * @clip - determines if destination clipping is enabled
+ * @rotation - determines if rotation is requested
+ * @fullrange - determines YUV<->RGB conversion matrix (iVMx)
+ * @swap_fg_bg - determines if FG and BG should be swapped when blending
+ * @flags - the flags passed in the blt request
+ * @flag_param - parameter required by certain flags,
+ * e.g. color for source color keying.
+ * @transform - the transforms passed in the blt request
+ * @global_alpha - the global alpha
+ * @clip_rect - the clipping rectangle to use
+ * @horiz_rescale - determmines if horizontal rescaling is enabled
+ * @horiz_sf - the horizontal scale factor
+ * @vert_rescale - determines if vertical rescale is enabled
+ * @vert_sf - the vertical scale factor
+ * @src - the incoming source buffer
+ * @bg - the incoming background buffer
+ * @dst - the outgoing destination buffer
+ * @work_bufs - work buffer specifications
+ * @tmp_bufs - temporary buffers
+ * @buf_count - the number of temporary buffers used for the job
+ * @node_count - the number of nodes used for the job
+ * @max_buf_size - the maximum size of temporary buffers
+ * @nbr_rows - the number of tile rows in the blit operation
+ * @nbr_cols - the number of time columns in the blit operation
+ */
+struct b2r2_node_split_job {
+ enum b2r2_op_type type;
+
+ const u32 *ivmx;
+
+ bool blend;
+ bool clip;
+ bool rotation;
+ bool fullrange;
+
+ bool swap_fg_bg;
+
+ u32 flags;
+ u32 flag_param;
+ u32 transform;
+ u32 global_alpha;
+
+ struct b2r2_blt_rect clip_rect;
+
+ bool h_rescale;
+ u16 h_rsf;
+
+ bool v_rescale;
+ u16 v_rsf;
+
+ struct b2r2_node_split_buf src;
+ struct b2r2_node_split_buf bg;
+ struct b2r2_node_split_buf dst;
+
+ struct b2r2_work_buf work_bufs[MAX_TMP_BUFS_NEEDED];
+ struct b2r2_node_split_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+
+ u32 buf_count;
+ u32 node_count;
+ u32 max_buf_size;
+};
+
+/**
+ * struct b2r2_core_job - Represents a B2R2 core job
+ *
+ * @start_sentinel: Memory overwrite guard
+ *
+ * @tag: Client value. Used by b2r2_core_job_find_first_with_tag().
+ * @prio: Job priority, from -19 up to 20. Mapped to the
+ * B2R2 application queues. Filled in by the client.
+ * @first_node_address: Physical address of the first node. Filled
+ * in by the client.
+ * @last_node_address: Physical address of the last node. Filled
+ * in by the client.
+ *
+ * @callback: Function that will be called when the job is done.
+ * @acquire_resources: Function that allocates the resources needed
+ * to execute the job (i.e. SRAM alloc). Must not
+ * sleep if atomic, should fail with negative error code
+ * if resources not available.
+ * @release_resources: Function that releases the resources previously
+ * allocated by acquire_resources (i.e. SRAM alloc).
+ * @release: Function that will be called when the reference count reaches
+ * zero.
+ *
+ * @job_id: Unique id for this job, assigned by B2R2 core
+ * @job_state: The current state of the job
+ * @jiffies: Number of jiffies needed for this request
+ *
+ * @list: List entry element for internal list management
+ * @event: Wait queue event to wait for job done
+ * @work: Work queue structure, for callback implementation
+ *
+ * @queue: The queue that this job shall be submitted to
+ * @control: B2R2 Queue control
+ * @pace_control: For composition queue only
+ * @interrupt_context: Context for interrupt
+ * @hw_start_time: The point when the b2r2 HW queue is activated for this job
+ * @nsec_active_in_hw: Time spent on the b2r2 HW queue for this job
+ *
+ * @end_sentinel: Memory overwrite guard
+ */
+struct b2r2_core_job {
+ u32 start_sentinel;
+
+ /* Data to be filled in by client */
+ int tag;
+ int data;
+ int prio;
+ u32 first_node_address;
+ u32 last_node_address;
+ void (*callback)(struct b2r2_core_job *);
+ int (*acquire_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release)(struct b2r2_core_job *);
+
+ /* Output data, do not modify */
+ int job_id;
+ enum b2r2_core_job_state job_state;
+ unsigned long jiffies;
+
+ /* Data below is internal to b2r2_core, do not modify */
+
+ /* Reference counting */
+ u32 ref_count;
+
+ /* Internal data */
+ struct list_head list;
+ wait_queue_head_t event;
+ struct work_struct work;
+
+ /* B2R2 HW data */
+ enum b2r2_core_queue queue;
+ u32 control;
+ u32 pace_control;
+ u32 interrupt_context;
+
+ /* Timing data */
+ u32 hw_start_time;
+ s32 nsec_active_in_hw;
+
+ u32 end_sentinel;
+};
+
+/**
+ * struct b2r2_blt_request - Represents one B2R2 blit request
+ *
+ * @instance: Back pointer to the instance structure
+ * @list: List item to keep track of requests per instance
+ * @user_req: The request received from userspace
+ * @job: The administration structure for the B2R2 job,
+ * consisting of one or more nodes
+ * @node_split_job: The administration structure for the B2R2 node split job
+ * @first_node: Pointer to the first B2R2 node
+ * @request_id: Request id for this job
+ * @core_mask: Bit mask with the cores doing part of the job
+ * @node_split_handle: Handle of the node split
+ * @src_resolved: Calculated info about the source buffer
+ * @src_mask_resolved: Calculated info about the source mask buffer
+ * @bg_resolved: Calculated info about the background buffer
+ * @dst_resolved: Calculated info about the destination buffer
+ * @profile: True if the blit shall be profiled, false otherwise
+ */
+struct b2r2_blt_request {
+ struct b2r2_control_instance *instance;
+ struct list_head list;
+ struct b2r2_blt_req user_req;
+ struct b2r2_core_job job;
+ struct b2r2_node_split_job node_split_job;
+ struct b2r2_node *first_node;
+ int request_id;
+ u32 core_mask;
+
+ /* Resolved buffer addresses */
+ struct b2r2_resolved_buf src_resolved;
+ struct b2r2_resolved_buf src_mask_resolved;
+ struct b2r2_resolved_buf bg_resolved;
+ struct b2r2_resolved_buf dst_resolved;
+
+ /* TBD: Info about SRAM usage & needs */
+ struct b2r2_work_buf *bufs;
+ u32 buf_count;
+
+ /* color look-up table */
+ void *clut;
+ u32 clut_phys_addr;
+
+ /* Profiling stuff */
+ bool profile;
+
+ s32 nsec_active_in_cpu;
+
+ u32 start_time_nsec;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_mem_heap - The memory heap
+ *
+ * @start_phys_addr: Physical memory start address
+ * @start_virt_ptr: Virtual pointer to start
+ * @size: Memory size
+ * @align: Alignment
+ * @blocks: List of all blocks
+ * @heap_lock: Protection for the heap
+ * @node_size: Size of each B2R2 node
+ * @node_heap: Heap for B2R2 node allocations
+ * @debugfs_root_dir: Debugfs B2R2 mem root dir
+ * @debugfs_heap_stats: Debugfs B2R2 memory status
+ * @debugfs_dir_blocks: Debugfs B2R2 free blocks dir
+ */
+struct b2r2_mem_heap {
+ dma_addr_t start_phys_addr;
+ void *start_virt_ptr;
+ u32 size;
+ u32 align;
+ struct list_head blocks;
+ spinlock_t heap_lock;
+ u32 node_size;
+ struct dma_pool *node_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_heap_stats;
+ struct dentry *debugfs_dir_blocks;
+#endif
+};
+
+/**
+ *
+ * @dev: The device handle of the b2r2 instance
+ * @id: The id of the b2r2 instance
+ * @name: The name of the b2r2 instance
+ * @data: Used to store a reference to b2r2_core
+ * @tmp_bufs: Temporary buffers needed in the node splitter
+ * @filters_initialized: Indicating of filters has been
+ * initialized for this b2r2 instance
+ * @mem_heap: The b2r2 heap, e.g. used to allocate nodes
+ * @debugfs_latest_request: Copy of the latest request issued
+ * @debugfs_root_dir: The debugfs root directory, e.g. /debugfs/b2r2
+ * @debugfs_debug_root_dir: The b2r2 debug root directory,
+ * e.g. /debugfs/b2r2/debug
+ * @stat_lock: Spin lock protecting the statistics
+ * @stat_n_jobs_added: Number of jobs added to b2r2_core
+ * @stat_n_jobs_released: Number of jobs released (job_release called)
+ * @stat_n_jobs_in_report_list: Number of jobs currently in the report list
+ * @stat_n_in_blt: Number of client threads currently exec inside b2r2_blt()
+ * @stat_n_in_blt_synch: Number of client threads currently waiting for synch
+ * @stat_n_in_blt_add: Number of client threads currenlty adding in b2r2_blt
+ * @stat_n_in_blt_wait: Number of client threads currently waiting in b2r2_blt
+ * @stat_n_in_synch_0: Number of client threads currently in b2r2_blt_sync
+ * waiting for all client jobs to finish
+ * @stat_n_in_synch_job: Number of client threads currently in b2r2_blt_sync
+ * waiting specific job to finish
+ * @stat_n_in_query_cap: Number of clients currently in query cap
+ * @stat_n_in_open: Number of clients currently in b2r2_blt_open
+ * @stat_n_in_release: Number of clients currently in b2r2_blt_release
+ * @last_job_lock: Mutex protecting last_job
+ * @last_job: The last running job on this b2r2 instance
+ * @last_job_chars: Temporary buffer used in printing last_job
+ * @prev_node_count: Node cound of last_job
+ */
+struct b2r2_control {
+ struct device *dev;
+ void *data;
+ int id;
+ struct kref ref;
+ bool enabled;
+ struct tmp_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+ int filters_initialized;
+ struct b2r2_mem_heap mem_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct b2r2_blt_request debugfs_latest_request;
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_debug_root_dir;
+#endif
+ struct mutex stat_lock;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_released;
+ unsigned long stat_n_jobs_in_report_list;
+ unsigned long stat_n_in_blt;
+ unsigned long stat_n_in_blt_synch;
+ unsigned long stat_n_in_blt_add;
+ unsigned long stat_n_in_blt_wait;
+ unsigned long stat_n_in_synch_0;
+ unsigned long stat_n_in_synch_job;
+ unsigned long stat_n_in_query_cap;
+ unsigned long stat_n_in_open;
+ unsigned long stat_n_in_release;
+ struct mutex last_job_lock;
+ struct b2r2_node *last_job;
+ char *last_job_chars;
+ int prev_node_count;
+};
+
+/* FIXME: The functions below should be removed when we are
+ switching to the new Robert Lind allocator */
+
+/**
+ * b2r2_blt_alloc_nodes() - Allocate nodes
+ *
+ * @node_count: Number of nodes to allocate
+ *
+ * Return:
+ * Returns a pointer to the first node in the node list.
+ */
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int node_count);
+
+/**
+ * b2r2_blt_free_nodes() - Release nodes previously allocated via
+ * b2r2_generate_nodes
+ *
+ * @first_node: First node in linked list of nodes
+ */
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node);
+
+/**
+ * b2r2_blt_module_init() - Initialize the B2R2 blt module
+ */
+int b2r2_blt_module_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_blt_module_exit() - Un-initialize the B2R2 blt module
+ */
+void b2r2_blt_module_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_blt_add_control() - Add the b2r2 core control
+ */
+void b2r2_blt_add_control(struct b2r2_control *cont);
+
+/**
+ * b2r2_blt_remove_control() - Remove the b2r2 core control
+ */
+void b2r2_blt_remove_control(struct b2r2_control *cont);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_kernel_if.c b/drivers/video/b2r2/b2r2_kernel_if.c
new file mode 100644
index 00000000000..373311ccca5
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_kernel_if.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 kernel interface for beeing a separate module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(fput_light);
+EXPORT_SYMBOL(flush_cache_range);
+EXPORT_SYMBOL(task_sched_runtime);
+#ifdef CONFIG_ANDROID_PMEM
+EXPORT_SYMBOL(get_pmem_file);
+EXPORT_SYMBOL(put_pmem_file);
+EXPORT_SYMBOL(flush_pmem_file);
+#endif
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.c b/drivers/video/b2r2/b2r2_mem_alloc.c
new file mode 100644
index 00000000000..584b324b8fe
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.c
@@ -0,0 +1,669 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_mem_alloc.h"
+
+/* Forward declarations */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ struct b2r2_control *cont, u32 offset, u32 size, bool free);
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block);
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mem_heap,
+ struct b2r2_mem_heap_status *mem_heap_status);
+
+/* Align value down to specified alignment */
+static inline u32 align_down(u32 align, u32 value)
+{
+ return value & ~(align - 1);
+}
+
+/* Align value up to specified alignment */
+static inline u32 align_up(u32 align, u32 value)
+{
+ return (value + align - 1) & ~(align - 1);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/* About debugfs:
+ * debugfs is a mountable debug file system.
+ *
+ * Mount like this:
+ * mkdir /debug
+ * mount -t debugfs none /debug
+ * ls /debug/b2r2/mem
+ *
+ * ls -al /debug/b2r2/mem/blocks
+ * cat /debug/b2r2/mem/stats
+ */
+
+
+/* Create string containing memory heap status */
+static char *get_b2r2_mem_stats(struct b2r2_mem_heap *mem_heap, char *buf)
+{
+ struct b2r2_mem_heap_status mem_heap_status;
+
+ if (b2r2_mem_heap_status(mem_heap, &mem_heap_status) != 0) {
+ strcpy(buf, "Error, failed to get status\n");
+ return buf;
+ }
+
+ sprintf(buf,
+ "Handle : 0x%lX\n"
+ "Physical start address : 0x%lX\n"
+ "Size : %lu\n"
+ "Align : %lu\n"
+ "No of blocks allocated : %lu\n"
+ "Allocated size : %lu\n"
+ "No of free blocks : %lu\n"
+ "Free size : %lu\n"
+ "No of locks : %lu\n"
+ "No of locked : %lu\n"
+ "No of nodes : %lu\n",
+ (unsigned long) mem_heap,
+ (unsigned long) mem_heap_status.start_phys_addr,
+ (unsigned long) mem_heap_status.size,
+ (unsigned long) mem_heap_status.align,
+ (unsigned long) mem_heap_status.num_alloc,
+ (unsigned long) mem_heap_status.allocated_size,
+ (unsigned long) mem_heap_status.num_free,
+ (unsigned long) mem_heap_status.free_size,
+ (unsigned long) mem_heap_status.num_locks,
+ (unsigned long) mem_heap_status.num_locked,
+ (unsigned long) mem_heap_status.num_nodes);
+
+ return buf;
+}
+
+/*
+ * Print memory heap status on file
+ * (Use like "cat /debug/b2r2/mem/stats")
+ */
+static int debugfs_b2r2_mem_stats_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_heap *mem_heap = filp->f_dentry->d_inode->i_private;
+ char Buf[400];
+ size_t dev_size;
+ int ret = 0;
+
+ get_b2r2_mem_stats(mem_heap, Buf);
+ dev_size = strlen(Buf);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for the "stats" file */
+static const struct file_operations debugfs_b2r2_mem_stats_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_stats_read,
+};
+
+/* read function for file in the "blocks" sub directory */
+static int debugfs_b2r2_mem_block_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_block *mem_block = filp->f_dentry->d_inode->i_private;
+ char Buf[200];
+ size_t dev_size;
+ int ret = 0;
+
+ dev_size = sprintf(Buf, "offset: %08lX %s size: %8d "
+ "lock_count: %2d\n",
+ (unsigned long) mem_block->offset,
+ mem_block->free ? "free" : "allc",
+ mem_block->size,
+ mem_block->lock_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for files in the "blocks" directory */
+static const struct file_operations debugfs_b2r2_mem_block_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_block_read,
+};
+
+/*
+ * Create or update the debugfs directory entry for a file in the
+ * "blocks" directory (a memory allocation)
+ */
+void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block,
+ struct dentry *parent)
+{
+ struct timespec tm = current_kernel_time();
+ struct timespec atime = tm;
+ struct timespec mtime = tm;
+ struct timespec ctime = tm;
+
+ if (!IS_ERR_OR_NULL(mem_block->debugfs_block)) {
+ atime = mem_block->debugfs_block->d_inode->i_atime;
+ ctime = mem_block->debugfs_block->d_inode->i_ctime;
+ debugfs_remove(mem_block->debugfs_block);
+ mem_block->debugfs_block = NULL;
+ }
+
+ /* Add the block in debugfs */
+ if (mem_block->free)
+ sprintf(mem_block->debugfs_fname, "%08lX free",
+ (unsigned long) mem_block->offset);
+ else {
+ sprintf(mem_block->debugfs_fname, "%08lX allc h:%08lX "
+ "lck:%d ",
+ (unsigned long) mem_block->offset,
+ (unsigned long) mem_block,
+ mem_block->lock_count);
+ }
+
+ mem_block->debugfs_block = debugfs_create_file(
+ mem_block->debugfs_fname,
+ 0444, parent, mem_block,
+ &debugfs_b2r2_mem_block_fops);
+ if (!IS_ERR_OR_NULL(mem_block->debugfs_block)) {
+ mem_block->debugfs_block->d_inode->i_size = mem_block->size;
+ mem_block->debugfs_block->d_inode->i_atime = atime;
+ mem_block->debugfs_block->d_inode->i_mtime = mtime;
+ mem_block->debugfs_block->d_inode->i_ctime = ctime;
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module initialization function */
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size)
+{
+ struct b2r2_mem_block *mem_block;
+ u32 aligned_size;
+
+ dev_info(cont->dev, "%s: Creating heap for size %d bytes\n",
+ __func__, (int) heap_size);
+
+ /* Align size */
+ aligned_size = align_down(align, heap_size);
+ if (aligned_size == 0)
+ return -EINVAL;
+
+ cont->mem_heap.start_virt_ptr = dma_alloc_coherent(cont->dev,
+ aligned_size, &(cont->mem_heap.start_phys_addr), GFP_KERNEL);
+ if (!cont->mem_heap.start_phys_addr || !cont->mem_heap.start_virt_ptr) {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the heap */
+ cont->mem_heap.size = aligned_size;
+ cont->mem_heap.align = align;
+
+ INIT_LIST_HEAD(&cont->mem_heap.blocks);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debugfs */
+ if (!IS_ERR_OR_NULL(cont->mem_heap.debugfs_root_dir)) {
+ cont->mem_heap.debugfs_heap_stats = debugfs_create_file(
+ "stats", 0444, cont->mem_heap.debugfs_root_dir,
+ &cont->mem_heap, &debugfs_b2r2_mem_stats_fops);
+ cont->mem_heap.debugfs_dir_blocks = debugfs_create_dir(
+ "blocks", cont->mem_heap.debugfs_root_dir);
+ }
+#endif
+
+ /* Create the first _free_ memory block */
+ mem_block = b2r2_mem_block_alloc(cont, 0, aligned_size, true);
+ if (!mem_block) {
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Add the free block to the blocks list */
+ list_add(&mem_block->list, &cont->mem_heap.blocks);
+
+ /* Allocate separate heap for B2R2 nodes */
+ cont->mem_heap.node_size = node_size;
+ cont->mem_heap.node_heap = dma_pool_create("b2r2_node_cache",
+ cont->dev, node_size, align, 4096);
+ if (!cont->mem_heap.node_heap) {
+ b2r2_mem_block_free(mem_block);
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_init);
+
+/* Module exit function */
+void b2r2_mem_exit(struct b2r2_control *cont)
+{
+ struct list_head *ptr;
+
+ /* Free B2R2 node heap */
+ dma_pool_destroy(cont->mem_heap.node_heap);
+
+ list_for_each(ptr, &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ b2r2_mem_block_free(mem_block);
+ }
+
+ dma_free_coherent(cont->dev, cont->mem_heap.size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+}
+EXPORT_SYMBOL(b2r2_mem_exit);
+
+/* Return status of the heap */
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mheap,
+ struct b2r2_mem_heap_status *mem_heap_status)
+{
+ struct list_head *ptr;
+
+ if (!mheap || !mem_heap_status)
+ return -EINVAL;
+ memset(mem_heap_status, 0, sizeof(*mem_heap_status));
+
+ /* Lock the heap */
+ spin_lock(&mheap->heap_lock);
+
+ /* Fill in static info */
+ mem_heap_status->start_phys_addr = mheap->start_phys_addr;
+ mem_heap_status->size = mheap->size;
+ mem_heap_status->align = mheap->align;
+
+ list_for_each(ptr, &mheap->blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free) {
+ mem_heap_status->num_free++;
+ mem_heap_status->free_size += mem_block->size;
+ } else {
+ if (mem_block->lock_count) {
+ mem_heap_status->num_locked++;
+ mem_heap_status->num_locks +=
+ mem_block->lock_count;
+ }
+ mem_heap_status->num_alloc++;
+ mem_heap_status->allocated_size += mem_block->size;
+ }
+ }
+
+ spin_unlock(&mheap->heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_heap_status);
+
+/* Internal: Allocate a housekeeping structure
+ * for an allocated or free memory block
+ */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ struct b2r2_control *cont, u32 offset, u32 size, bool free)
+{
+ struct b2r2_mem_block *mem_block = kmalloc(
+ sizeof(struct b2r2_mem_block), GFP_KERNEL);
+
+ if (mem_block) {
+ mem_block->offset = offset;
+ mem_block->size = size;
+ mem_block->free = free;
+ mem_block->lock_count = 0;
+
+ INIT_LIST_HEAD(&mem_block->list);
+
+#ifdef CONFIG_DEBUG_FS
+ mem_block->debugfs_block = NULL;
+ /* Add the block in debugfs */
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ return mem_block;
+}
+
+/* Internal: Release housekeeping structure */
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block)
+{
+ if (mem_block) {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(mem_block->debugfs_block);
+#endif
+ kfree(mem_block);
+ }
+}
+
+/* Allocate a block from the heap */
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle)
+{
+ int ret = 0;
+ struct list_head *ptr;
+ struct b2r2_mem_block *found_mem_block = NULL;
+ u32 aligned_size;
+
+ if (!mem_handle)
+ return -EINVAL;
+
+ printk(KERN_INFO "%s: size=%d\n", __func__, requested_size);
+
+ *mem_handle = 0;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ aligned_size = align_up(cont->mem_heap.align, requested_size);
+ /* Try to find the best matching free block of suitable size */
+ list_for_each(ptr, &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free && mem_block->size >= aligned_size &&
+ (!found_mem_block ||
+ mem_block->size < found_mem_block->size)) {
+ found_mem_block = mem_block;
+ if (found_mem_block->size == aligned_size)
+ break;
+ }
+ }
+
+ if (found_mem_block) {
+ struct b2r2_mem_block *new_block
+ = b2r2_mem_block_alloc(cont,
+ found_mem_block->offset,
+ requested_size, false);
+
+ if (new_block) {
+ /* Insert the new block before the found block */
+ list_add_tail(&new_block->list,
+ &found_mem_block->list);
+
+ /* Split the free block */
+ found_mem_block->offset += aligned_size;
+ found_mem_block->size -= aligned_size;
+
+ if (found_mem_block->size == 0)
+ b2r2_mem_block_free(found_mem_block);
+ else {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(
+ found_mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ *mem_handle = (u32) new_block;
+ *returned_size = aligned_size;
+ } else {
+ ret = -ENOMEM;
+ }
+ } else
+ ret = -ENOMEM;
+
+ if (ret != 0) {
+ *returned_size = 0;
+ *mem_handle = (u32) 0;
+ }
+
+ /* Unlock */
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_alloc);
+
+/* Free the allocated block */
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle)
+{
+ int ret = 0;
+ struct b2r2_mem_block *mem_block = (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ if (!ret && mem_block->free)
+ ret = -EINVAL;
+
+ if (!ret) {
+ printk(KERN_INFO "%s: freeing block 0x%p\n", __func__, mem_block);
+ /* Release the block */
+
+ mem_block->free = true;
+ mem_block->size = align_up(cont->mem_heap.align,
+ mem_block->size);
+
+ /* Join with previous block if possible */
+ if (mem_block->list.prev != &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *prev_block =
+ list_entry(mem_block->list.prev,
+ struct b2r2_mem_block, list);
+
+ if (prev_block->free &&
+ (prev_block->offset + prev_block->size) ==
+ mem_block->offset) {
+ mem_block->offset = prev_block->offset;
+ mem_block->size += prev_block->size;
+
+ b2r2_mem_block_free(prev_block);
+ }
+ }
+
+ /* Join with next block if possible */
+ if (mem_block->list.next != &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *next_block
+ = list_entry(mem_block->list.next,
+ struct b2r2_mem_block,
+ list);
+
+ if (next_block->free &&
+ (mem_block->offset + mem_block->size) ==
+ next_block->offset) {
+ mem_block->size += next_block->size;
+
+ b2r2_mem_block_free(next_block);
+ }
+ }
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ /* Unlock */
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_free);
+
+/* Lock the allocated block in memory */
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ mem_block->lock_count++;
+
+ if (phys_addr)
+ *phys_addr = cont->mem_heap.start_phys_addr + mem_block->offset;
+ if (virt_ptr)
+ *virt_ptr = (char *) cont->mem_heap.start_virt_ptr +
+ mem_block->offset;
+ if (size)
+ *size = align_up(cont->mem_heap.align, mem_block->size);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_lock);
+
+/* Unlock the allocated block in memory */
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ mem_block->lock_count--;
+
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ /* debugfs will be updated in release */
+ return 0;
+/* return b2r2_mem_free(mem_handle);*/
+}
+EXPORT_SYMBOL(b2r2_mem_unlock);
+
+/* Allocate one or more b2r2 nodes from DMA pool */
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node)
+{
+ int i;
+ int ret = 0;
+ u32 physical_address;
+ struct b2r2_node *first_node_ptr;
+ struct b2r2_node *node_ptr;
+
+ /* Check input parameters */
+ if ((num_nodes <= 0) || !first_node) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Invalid parameter for b2r2_node_alloc, "
+ "num_nodes=%d, first_node=%ld\n",
+ (int) num_nodes, (long) first_node);
+ return -EINVAL;
+ }
+
+ /* Allocate the first node */
+ first_node_ptr = dma_pool_alloc(cont->mem_heap.node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (!first_node_ptr) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Failed to allocate memory for node\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize first node */
+ first_node_ptr->next = NULL;
+ first_node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+
+ /* Allocate and initialize remaining nodes, */
+ /* and link them into a list */
+ for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) {
+ node_ptr->next = dma_pool_alloc(cont->mem_heap.node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (node_ptr->next) {
+ node_ptr = node_ptr->next;
+ node_ptr->next = NULL;
+ node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+ } else {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /* If all nodes were allocated successfully, */
+ /* return the first node */
+ if (!ret)
+ *first_node = first_node_ptr;
+ else
+ b2r2_node_free(cont, first_node_ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_node_alloc);
+
+/* Free a linked list of b2r2 nodes */
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node)
+{
+ struct b2r2_node *current_node = first_node;
+ struct b2r2_node *next_node = NULL;
+
+ /* Traverse the linked list and free the nodes */
+ while (current_node != NULL) {
+ next_node = current_node->next;
+ dma_pool_free(cont->mem_heap.node_heap, current_node,
+ current_node->physical_address -
+ offsetof(struct b2r2_node, node));
+ current_node = next_node;
+ }
+}
+EXPORT_SYMBOL(b2r2_node_free);
+
+MODULE_AUTHOR("Robert Lind <robert.lind@ericsson.com");
+MODULE_DESCRIPTION("Ericsson AB B2R2 physical memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.h b/drivers/video/b2r2/b2r2_mem_alloc.h
new file mode 100644
index 00000000000..4fd1e66abca
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_MEM_ALLOC_H
+#define __B2R2_MEM_ALLOC_H
+
+#include "b2r2_internal.h"
+
+
+/**
+ * struct b2r2_mem_heap_status - Information about current state of the heap
+ *
+ * @start_phys_addr: Physical address of the the memory area
+ * @size: Size of the memory area
+ * @align: Alignment of start and allocation sizes (in bytes).
+ * @num_alloc: Number of memory allocations
+ * @allocated_size: Size allocated (sum of requested sizes)
+ * @num_free: Number of free blocks (fragments)
+ * @free_size: Free size available for allocation
+ * @num_locks: Sum of number of number of locks on memory allocations
+ * @num_locked: Number of locked memory allocations
+ * @num_nodes: Number of node allocations
+ *
+ **/
+struct b2r2_mem_heap_status {
+ u32 start_phys_addr;
+ u32 size;
+ u32 align;
+ u32 num_alloc;
+ u32 allocated_size;
+ u32 num_free;
+ u32 free_size;
+ u32 num_locks;
+ u32 num_locked;
+ u32 num_nodes;
+};
+
+/**
+ * struct b2r2_mem_block - Represents one block of b2r2
+ * physical memory, free or allocated
+ *
+ * @list: For membership in list
+ * @offset: Offset in b2r2 physical memory area (aligned)
+ * @size: Size of the object (requested size if busy, else actual)
+ * @free: True if the block is free
+ * @lock_count: Lock count
+ * @debugfs_fname: Debugfs file name
+ * @debugfs_block: Debugfs dir entry for the block
+ */
+struct b2r2_mem_block {
+ struct list_head list;
+ u32 offset;
+ u32 size;
+ bool free;
+ u32 lock_count;
+#ifdef CONFIG_DEBUG_FS
+ char debugfs_fname[80];
+ struct dentry *debugfs_block;
+#endif
+};
+
+
+/* B2R2 memory API (kernel) */
+
+/**
+ * b2r2_mem_init() - Initializes the B2R2 memory manager
+ * @dev: Pointer to device to use for allocating the memory heap
+ * @heap_size: Size of the heap (in bytes)
+ * @align: Alignment to use for memory allocations on heap (in bytes)
+ * @node_size: Size of each B2R2 node (in bytes)
+ *
+ * Returns 0 if success, else negative error code
+ **/
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size);
+
+/**
+ * b2r2_mem_exit() - Cleans up the B2R2 memory manager
+ *
+ **/
+void b2r2_mem_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_mem_alloc() - Allocates memory block from physical memory heap
+ * @requested_size: Requested size
+ * @returned_size: Actual size of memory block. Might be adjusted due to
+ * alignment but is always >= requested size if function
+ * succeeds
+ * @mem_handle: Returned memory handle
+ *
+ * All memory allocations are movable when not locked.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle);
+
+/**
+ * b2r2_mem_free() - Frees an allocation
+ * @mem_handle: Memory handle
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle);
+
+/**
+ * b2r2_mem_lock() - Lock memory in memory and return physical address
+ * @mem_handle: Memory handle
+ * @phys_addr: Returned physical address to start of memory allocation.
+ * May be NULL.
+ * @virt_ptr: Returned virtual address pointer to start of memory allocation.
+ * May be NULL.
+ * @size: Returned size of memory allocation. May be NULL.
+ *
+ * The adress of the memory allocation is locked and the physical address
+ * is returned.
+ * The lock count is incremented by one.
+ * You need to call b2r2_mem_unlock once for each call to
+ * b2r2_mem_lock.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size);
+
+/**
+ * b2r2_mem_unlock() - Unlock previously locked memory
+ * @mem_handle: Memory handle
+ *
+ * Decrements lock count. When lock count reaches 0 the
+ * memory area is movable again.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle);
+
+/**
+ * b2r2_node_alloc() - Allocates B2R2 node from physical memory heap
+ * @num_nodes: Number of linked nodes to allocate
+ * @first_node: Returned pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node);
+
+/**
+ * b2r2_node_free() - Frees a linked list of allocated B2R2 nodes
+ * @first_node: Pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node);
+
+
+#endif /* __B2R2_MEM_ALLOC_H */
diff --git a/drivers/video/b2r2/b2r2_node_gen.c b/drivers/video/b2r2/b2r2_node_gen.c
new file mode 100644
index 00000000000..1f48bac6fe7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_gen.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node generator
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include "b2r2_internal.h"
+
+static void free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ int no_of_nodes = 0;
+
+ while (node) {
+ no_of_nodes++;
+ node = node->next;
+ }
+
+ dma_free_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ first_node,
+ first_node->physical_address -
+ offsetof(struct b2r2_node, node));
+}
+
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int no_of_nodes)
+{
+ u32 physical_address;
+ struct b2r2_node *nodes;
+ struct b2r2_node *tmpnode;
+
+ if (no_of_nodes <= 0) {
+ dev_err(cont->dev, "%s: Wrong number of nodes (%d)",
+ __func__, no_of_nodes);
+ return NULL;
+ }
+
+ /* Allocate the memory */
+ nodes = (struct b2r2_node *) dma_alloc_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ &physical_address, GFP_DMA | GFP_KERNEL);
+
+ if (nodes == NULL) {
+ dev_err(cont->dev,
+ "%s: Failed to alloc memory for nodes",
+ __func__);
+ return NULL;
+ }
+
+ /* Build the linked list */
+ tmpnode = nodes;
+ physical_address += offsetof(struct b2r2_node, node);
+ while (no_of_nodes--) {
+ tmpnode->physical_address = physical_address;
+ if (no_of_nodes)
+ tmpnode->next = tmpnode + 1;
+ else
+ tmpnode->next = NULL;
+
+ tmpnode++;
+ physical_address += sizeof(struct b2r2_node);
+ }
+
+ return nodes;
+}
+
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ free_nodes(cont, first_node);
+}
+
diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c
new file mode 100644
index 00000000000..b2fb07580ca
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.c
@@ -0,0 +1,3033 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include "b2r2_node_split.h"
+#include "b2r2_internal.h"
+#include "b2r2_hw_convert.h"
+#include "b2r2_filters.h"
+#include "b2r2_utils.h"
+
+#include <linux/kernel.h>
+
+/*
+ * Macros and constants
+ */
+
+#define INSTANCES_DEFAULT_SIZE 10
+#define INSTANCES_GROW_SIZE 5
+
+/*
+ * Internal types
+ */
+
+
+/*
+ * Global variables
+ */
+
+
+/*
+ * Forward declaration of private functions
+ */
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count,
+ bool fullrange);
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count);
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this);
+
+static void configure_src(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx);
+static void configure_bg(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *bg, bool swap_fg_bg);
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_blend(struct b2r2_control *cont, struct b2r2_node *node,
+ u32 flags, u32 global_alpha);
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect);
+
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+static void configure_direct_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next);
+static int configure_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color, enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_direct_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next);
+static int configure_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rotate(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_scale(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip);
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr, const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect, bool color_fill, u32 color);
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *this, u32 max_size,
+ enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height);
+
+static bool is_transform(const struct b2r2_blt_request *req);
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf);
+static s32 inv_rescale(s32 dim, u16 sf);
+
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values);
+
+static void reset_nodes(struct b2r2_node *node);
+
+static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
+ enum b2r2_blt_fmt dst_fmt);
+
+/*
+ * Public functions
+ */
+
+/**
+ * b2r2_node_split_analyze() - analyzes the request
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
+ u32 max_buf_size, u32 *node_count, struct b2r2_work_buf **bufs,
+ u32 *buf_count, struct b2r2_node_split_job *this)
+{
+ int ret;
+ bool color_fill;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ memset(this, 0, sizeof(*this));
+
+ /* Copy parameters */
+ this->flags = req->user_req.flags;
+ this->transform = req->user_req.transform;
+ this->max_buf_size = max_buf_size;
+ this->global_alpha = req->user_req.global_alpha;
+ this->buf_count = 0;
+ this->node_count = 0;
+
+ if (this->flags & B2R2_BLT_FLAG_BLUR) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Unsupported formats on src */
+ switch (req->user_req.src_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (b2r2_is_bgr_fmt(req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on dst */
+ switch (req->user_req.dst_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (b2r2_is_bgr_fmt(req->user_req.src_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on bg */
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ /*
+ * There are no ivmx on source 1, so check that there is no
+ * such requirement on the background to destination format
+ * conversion. This check is sufficient since the node splitter
+ * currently does not support destination ivmx. That fact also
+ * removes the source format as a parameter when checking the
+ * background format.
+ */
+ if (bg_format_require_ivmx(req->user_req.bg_img.fmt,
+ req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) &&
+ (b2r2_is_yuv_fmt(req->user_req.src_img.fmt) ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_8_BIT_A8)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source color keying "
+ "with YUV or pure alpha formats.\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if (this->flags & (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for color fill */
+ color_fill = (this->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ /* Configure the source and destination buffers */
+ set_buf(cont, &this->src, req->src_resolved.physical_address,
+ &req->user_req.src_img, &req->user_req.src_rect,
+ color_fill, req->user_req.src_color);
+
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ set_buf(cont, &this->bg, req->bg_resolved.physical_address,
+ &req->user_req.bg_img, &req->user_req.bg_rect,
+ false, 0);
+ }
+
+ set_buf(cont, &this->dst, req->dst_resolved.physical_address,
+ &req->user_req.dst_img, &req->user_req.dst_rect, false,
+ 0);
+
+ b2r2_log_info(cont->dev, "%s:\n"
+ "\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t"
+ "bg.rect=(%4d, %4d, %4d, %4d)\t"
+ "dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x,
+ this->src.rect.y, this->src.rect.width, this->src.rect.height,
+ this->bg.rect.x, this->bg.rect.y, this->bg.rect.width,
+ this->bg.rect.height, this->dst.rect.x, this->dst.rect.y,
+ this->dst.rect.width, this->dst.rect.height);
+
+ if (this->flags & B2R2_BLT_FLAG_DITHER)
+ this->dst.dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ this->flag_param = req->user_req.src_color;
+
+ /* Check for blending */
+ if ((this->flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) &&
+ (this->global_alpha != 255))
+ this->blend = true;
+ else if (this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ this->blend = (color_fill && b2r2_fmt_has_alpha(this->dst.fmt)) ||
+ b2r2_fmt_has_alpha(this->src.fmt);
+ else if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ this->blend = true;
+
+ /* Check for full range YUV conversion */
+ if (this->flags & B2R2_BLT_FLAG_FULL_RANGE_YUV)
+ this->fullrange = true;
+
+ if (this->blend && this->src.type == B2R2_FMT_TYPE_PLANAR) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: blend with planar"
+ " source\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check for clipping */
+ this->clip = (this->flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0;
+ if (this->clip) {
+ s32 l = req->user_req.dst_clip_rect.x;
+ s32 r = l + req->user_req.dst_clip_rect.width;
+ s32 t = req->user_req.dst_clip_rect.y;
+ s32 b = t + req->user_req.dst_clip_rect.height;
+
+ /* Intersect the clip and buffer rects */
+ if (l < 0)
+ l = 0;
+ if (r > req->user_req.dst_img.width)
+ r = req->user_req.dst_img.width;
+ if (t < 0)
+ t = 0;
+ if (b > req->user_req.dst_img.height)
+ b = req->user_req.dst_img.height;
+
+ this->clip_rect.x = l;
+ this->clip_rect.y = t;
+ this->clip_rect.width = r - l;
+ this->clip_rect.height = b - t;
+ } else {
+ /* Set the clip rectangle to the buffer bounds */
+ this->clip_rect.x = 0;
+ this->clip_rect.y = 0;
+ this->clip_rect.width = req->user_req.dst_img.width;
+ this->clip_rect.height = req->user_req.dst_img.height;
+ }
+
+ /* Validate the destination */
+ ret = check_rect(cont, &req->user_req.dst_img, &req->user_req.dst_rect,
+ &this->clip_rect);
+ if (ret < 0)
+ goto error;
+
+ /* Validate the source (if not color fill) */
+ if (!color_fill) {
+ ret = check_rect(cont, &req->user_req.src_img,
+ &req->user_req.src_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Validate the background source */
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ ret = check_rect(cont, &req->user_req.bg_img,
+ &req->user_req.bg_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Do the analysis depending on the type of operation */
+ if (color_fill) {
+ ret = analyze_color_fill(this, req, &this->node_count);
+ } else {
+
+ bool upsample;
+ bool downsample;
+
+ /*
+ * YUV formats that are non-raster, non-yuv444 needs to be
+ * up (or down) sampled using the resizer.
+ *
+ * NOTE: The resizer needs to be enabled for YUV444 as well,
+ * even though there is no upsampling. This is most
+ * likely a bug in the hardware.
+ */
+ upsample = this->src.type != B2R2_FMT_TYPE_RASTER &&
+ b2r2_is_yuv_fmt(this->src.fmt);
+ downsample = this->dst.type != B2R2_FMT_TYPE_RASTER &&
+ b2r2_is_yuv_fmt(this->dst.fmt);
+
+ if (is_transform(req) || upsample || downsample)
+ ret = analyze_transform(this, req, &this->node_count,
+ &this->buf_count);
+ else
+ ret = analyze_copy(this, req, &this->node_count,
+ &this->buf_count);
+ }
+
+ if (ret == -ENOSYS) {
+ goto unsupported;
+ } else if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Analysis failed!\n", __func__);
+ goto error;
+ }
+
+ /* Setup the origin and movement of the destination window */
+ if (this->dst.hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ this->dst.dx = -this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x + this->dst.rect.width - 1;
+ } else {
+ this->dst.dx = this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x;
+ }
+ if (this->dst.vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ this->dst.dy = -this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y + this->dst.rect.height - 1;
+ } else {
+ this->dst.dy = this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y;
+ }
+
+ *buf_count = this->buf_count;
+ *node_count = this->node_count;
+
+ if (this->buf_count > 0)
+ *bufs = &this->work_bufs[0];
+
+ b2r2_log_info(cont->dev, "%s: dst.win=(%d, %d, %d, %d), "
+ "dst.dx=%d, dst.dy=%d\n", __func__, this->dst.win.x,
+ this->dst.win.y, this->dst.win.width, this->dst.win.height,
+ this->dst.dx, this->dst.dy);
+ if (this->buf_count > 0)
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, buf_size=%d, "
+ "node_count=%d\n", __func__, *buf_count,
+ bufs[0]->size, *node_count);
+ else
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, node_count=%d\n",
+ __func__, *buf_count, *node_count);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * b2r2_node_split_configure() - configures the node list
+ */
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node *node = first;
+
+ u32 x_pixels = 0;
+ u32 y_pixels = 0;
+
+ reset_nodes(node);
+
+ while (y_pixels < dst->rect.height) {
+ s32 dst_x = dst->win.x;
+ s32 dst_w = dst->win.width;
+
+ /* Clamp window height */
+ if (dst->win.height > dst->rect.height - y_pixels)
+ dst->win.height = dst->rect.height - y_pixels;
+
+ while (x_pixels < dst->rect.width) {
+
+ /* Clamp window width */
+ if (dst_w > dst->rect.width - x_pixels)
+ dst->win.width = dst->rect.width - x_pixels;
+
+ ret = configure_tile(cont, this, node, &node);
+ if (ret < 0)
+ goto error;
+
+ dst->win.x += dst->dx;
+ x_pixels += max(dst->dx, -dst->dx);
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
+ }
+
+ dst->win.y += dst->dy;
+ y_pixels += max(dst->dy, -dst->dy);
+
+ dst->win.x = dst_x;
+ dst->win.width = dst_w;
+ x_pixels = 0;
+
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * b2r2_node_split_assign_buffers() - assigns temporary buffers to the node list
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first,
+ struct b2r2_work_buf *bufs, u32 buf_count)
+{
+ struct b2r2_node *node = first;
+
+ while (node != NULL) {
+ /* The indices are offset by one */
+ if (node->dst_tmp_index) {
+ BUG_ON(node->dst_tmp_index > buf_count);
+
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as "
+ "dst\n", __func__, node->dst_tmp_index);
+
+ node->node.GROUP1.B2R2_TBA =
+ bufs[node->dst_tmp_index - 1].phys_addr;
+ }
+ if (node->src_tmp_index) {
+ u32 addr = bufs[node->src_tmp_index - 1].phys_addr;
+
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as src "
+ "%d ", __func__, node->src_tmp_index,
+ node->src_index);
+
+ BUG_ON(node->src_tmp_index > buf_count);
+
+ switch (node->src_index) {
+ case 1:
+ b2r2_log_info(cont->dev, "1\n");
+ node->node.GROUP3.B2R2_SBA = addr;
+ break;
+ case 2:
+ b2r2_log_info(cont->dev, "2\n");
+ node->node.GROUP4.B2R2_SBA = addr;
+ break;
+ case 3:
+ b2r2_log_info(cont->dev, "3\n");
+ node->node.GROUP5.B2R2_SBA = addr;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s: tba=%p\tsba=%p\n", __func__,
+ (void *)node->node.GROUP1.B2R2_TBA,
+ (void *)node->node.GROUP4.B2R2_SBA);
+
+ node = node->next;
+ }
+
+ return 0;
+}
+
+/**
+ * b2r2_node_split_unassign_buffers() - releases temporary buffers
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
+{
+ return;
+}
+
+/**
+ * b2r2_node_split_cancel() - cancels and releases a job instance
+ */
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
+{
+ memset(this, 0, sizeof(*this));
+
+ return;
+}
+
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip)
+{
+ int ret;
+
+ s32 l, r, b, t;
+
+ /* Check rectangle dimensions*/
+ if ((rect->width <= 0) || (rect->height <= 0)) {
+ b2r2_log_warn(cont->dev, "%s: Illegal rect (%d, %d, %d, %d)\n",
+ __func__, rect->x, rect->y, rect->width,
+ rect->height);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* If we are using clip we should only look at the intersection of the
+ rects */
+ if (clip) {
+ l = max(rect->x, clip->x);
+ t = max(rect->y, clip->y);
+ r = min(rect->x + rect->width, clip->x + clip->width);
+ b = min(rect->y + rect->height, clip->y + clip->height);
+ } else {
+ l = rect->x;
+ t = rect->y;
+ r = rect->x + rect->width;
+ b = rect->y + rect->height;
+ }
+
+ /* Check so that the rect isn't outside the buffer */
+ if ((l < 0) || (t < 0) || (l >= img->width) || (t >= img->height)) {
+ b2r2_log_warn(cont->dev, "%s: rect origin outside buffer\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((r > img->width) || (b > img->height)) {
+ b2r2_log_warn(cont->dev, "%s: rect ends outside buffer\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Check so the intersected rectangle isn't empty */
+ if ((l == r) || (t == b)) {
+ b2r2_log_warn(cont->dev,
+ "%s: rect is empty (width or height zero)\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * bg_format_require_ivmx()
+ *
+ * Check if there are any color space conversion needed for the
+ * background to the destination format.
+ */
+static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
+ enum b2r2_blt_fmt dst_fmt)
+{
+ if (b2r2_is_rgb_fmt(bg_fmt)) {
+ if (b2r2_is_yvu_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (b2r2_is_yuv_fmt(dst_fmt))
+ return true;
+ else if (b2r2_is_bgr_fmt(dst_fmt))
+ return true;
+ } else if (b2r2_is_yvu_fmt(bg_fmt)) {
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return true;
+ else if (b2r2_is_bgr_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (b2r2_is_yuv_fmt(dst_fmt) &&
+ !b2r2_is_yvu_fmt(dst_fmt))
+ return true;
+ } else if (bg_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ bg_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ bg_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ bg_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ if (b2r2_is_rgb_fmt(dst_fmt)) {
+ return true;
+ } else if (b2r2_is_yvu_fmt(dst_fmt)) {
+ return true;
+ } else if (b2r2_is_yuv_fmt(dst_fmt)) {
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ break;
+ default:
+ return true;
+ }
+ }
+ } else if (b2r2_is_yuv_fmt(bg_fmt)) {
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return true;
+ else if (b2r2_is_bgr_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (b2r2_is_yvu_fmt(dst_fmt))
+ return true;
+ } else if (b2r2_is_bgr_fmt(bg_fmt)) {
+ if (b2r2_is_rgb_fmt(dst_fmt))
+ return true;
+ else if (b2r2_is_yvu_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (b2r2_is_yuv_fmt(dst_fmt))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * analyze_fmt_conv() - analyze the format conversions needed for a job
+ */
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count, bool fullrange)
+{
+ enum b2r2_color_conversion cc =
+ b2r2_get_color_conversion(src->fmt, dst->fmt, fullrange);
+
+ b2r2_get_vmx(cc, vmx);
+
+ if (dst->type == B2R2_FMT_TYPE_RASTER) {
+ *node_count = 1;
+ } else if (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR) {
+ *node_count = 2;
+ } else if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ *node_count = 3;
+ } else {
+ /* That's strange... */
+ BUG_ON(1);
+ }
+
+ return 0;
+}
+
+/**
+ * analyze_color_fill() - analyze a color fill operation
+ */
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+
+ /* Destination must be raster for raw fill to work */
+ if (this->dst.type != B2R2_FMT_TYPE_RASTER) {
+ b2r2_log_warn(cont->dev,
+ "%s: fill requires raster destination\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* We will try to fill the entire rectangle in one go */
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /* Check if this is a direct fill */
+ if ((!this->blend) && ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ARGB8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_AYUV8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_VUYA8888))) {
+ this->type = B2R2_DIRECT_FILL;
+
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+
+ /* The entire destination rectangle will be */
+ memcpy(&this->dst.win, &this->dst.rect,
+ sizeof(this->dst.win));
+ *node_count = 1;
+ } else {
+ this->type = B2R2_FILL;
+
+ /* Determine the fill color format */
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) {
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+ } else {
+ /* If the dst fmt is YUV the fill fmt will be as well */
+ if (b2r2_is_yuv_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else if (b2r2_is_rgb_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (b2r2_is_bgr_fmt(this->dst.fmt)) {
+ /* Color will still be ARGB, we will translate
+ using IVMX (configured later) */
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Illegal destination format for fill",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* Also, B2R2 seems to ignore the pixel alpha value */
+ if (((this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ != 0) &&
+ ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)
+ == 0) && b2r2_fmt_has_alpha(this->src.fmt)) {
+ u8 pixel_alpha = b2r2_get_alpha(this->src.fmt,
+ this->src.color);
+ u32 new_global = pixel_alpha * this->global_alpha / 255;
+
+ this->global_alpha = (u8)new_global;
+
+ /* Set the pixel alpha to full opaque so we don't get
+ any nasty surprises */
+ this->src.color = b2r2_set_alpha(this->src.fmt, 0xFF,
+ this->src.color);
+ }
+
+ ret = analyze_fmt_conv(
+ cont, &this->src, &this->dst, &this->ivmx,
+ node_count, this->fullrange);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_transform() - analyze a transform operation (rescale, rotate, etc.)
+ */
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ bool is_scaling;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * The transform enum is defined so that all rotation transforms are
+ * masked with the rotation flag
+ */
+ this->rotation = (this->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0;
+
+ /* B2R2 cannot do rotations if the destination is not raster, or 422R */
+ if (this->rotation && (this->dst.type != B2R2_FMT_TYPE_RASTER ||
+ this->dst.fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ this->dst.fmt == B2R2_BLT_FMT_CB_Y_CR_Y)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Unsupported operation "
+ "(rot && (!dst_raster || dst==422R))",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Flip the image by changing the scan order of the destination */
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ this->dst.hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Check for scaling */
+ if (this->rotation) {
+ is_scaling = (this->src.rect.width != this->dst.rect.height) ||
+ (this->src.rect.height != this->dst.rect.width);
+ } else {
+ is_scaling = (this->src.rect.width != this->dst.rect.width) ||
+ (this->src.rect.height != this->dst.rect.height);
+ }
+
+ /* Plane separated formats must be treated as scaling */
+ is_scaling = is_scaling ||
+ (this->src.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->src.type == B2R2_FMT_TYPE_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_PLANAR);
+
+ if (is_scaling && this->rotation && this->blend) {
+ /* TODO: This is unsupported. Fix it! */
+ b2r2_log_info(cont->dev, "%s: Unsupported operation "
+ "(rot+rescale+blend)\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check which type of transform */
+ if (is_scaling && this->rotation) {
+ ret = analyze_rot_scale(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (is_scaling) {
+ ret = analyze_scaling(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (this->rotation) {
+ ret = analyze_rotate(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else {
+ /* No additional nodes needed for a flip */
+ ret = analyze_copy(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ this->type = B2R2_FLIP;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * analyze_copy() - analyze a copy operation
+ */
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ if (!this->blend &&
+ !(this->flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ (this->src.fmt == this->dst.fmt) &&
+ (this->src.type == B2R2_FMT_TYPE_RASTER) &&
+ (this->dst.rect.x >= this->clip_rect.x) &&
+ (this->dst.rect.y >= this->clip_rect.y) &&
+ (this->dst.rect.x + this->dst.rect.width <=
+ this->clip_rect.x + this->clip_rect.width) &&
+ (this->dst.rect.y + this->dst.rect.height <=
+ this->clip_rect.y + this->clip_rect.height)) {
+ this->type = B2R2_DIRECT_COPY;
+ *node_count = 1;
+ } else {
+ u32 copy_count;
+
+ this->type = B2R2_COPY;
+
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst,
+ &this->ivmx, &copy_count, this->fullrange);
+ if (ret < 0)
+ goto error;
+
+ *node_count = copy_count;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+static int calc_rot_count(u32 width, u32 height)
+{
+ int count;
+
+ count = width / B2R2_ROTATE_MAX_WIDTH;
+ if (width % B2R2_ROTATE_MAX_WIDTH)
+ count++;
+ if (height > B2R2_ROTATE_MAX_WIDTH &&
+ height % B2R2_ROTATE_MAX_WIDTH)
+ count *= 2;
+
+ return count;
+}
+
+static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ u32 num_rows;
+ u32 num_cols;
+ u32 rot_count;
+ u32 rescale_count;
+ u32 nodes_per_rot;
+ u32 nodes_per_rescale;
+ u32 right_width;
+ u32 bottom_height;
+ const u32 *dummy_vmx;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Calculate the desired tmp buffer size */
+ tmp->win.width = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ tmp->win.width >>= 10;
+ tmp->win.width = min(tmp->win.width, dst->rect.height);
+ tmp->win.height = dst->rect.width;
+
+ setup_tmp_buf(cont, tmp, this->max_buf_size, dst->fmt, tmp->win.width,
+ tmp->win.height);
+ tmp->tmp_buf_index = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ tmp->win.width = tmp->rect.width;
+ tmp->win.height = tmp->rect.height;
+
+ tmp->dither = dst->dither;
+ dst->dither = 0;
+
+ /* Update the dst window with the actual tmp buffer dimensions */
+ dst->win.width = tmp->win.height;
+ dst->win.height = tmp->win.width;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /*
+ * Calculate how many nodes are required to copy to and from the tmp
+ * buffer
+ */
+ ret = analyze_fmt_conv(cont, src, tmp, &this->ivmx, &nodes_per_rescale,
+ this->fullrange);
+ if (ret < 0)
+ goto error;
+
+ /* We will not do any format conversion in the rotation stage */
+ ret = analyze_fmt_conv(cont, tmp, dst, &dummy_vmx, &nodes_per_rot,
+ this->fullrange);
+ if (ret < 0)
+ goto error;
+
+ /* Calculate node count for the inner tiles */
+ num_cols = dst->rect.width / dst->win.width;
+ num_rows = dst->rect.height / dst->win.height;
+
+ rescale_count = num_cols * num_rows;
+ rot_count = calc_rot_count(dst->win.height, dst->win.width) *
+ num_cols * num_rows;
+
+ right_width = dst->rect.width % dst->win.width;
+ bottom_height = dst->rect.height % dst->win.height;
+
+ /* Calculate node count for the rightmost tiles */
+ if (right_width) {
+ u32 count = calc_rot_count(dst->win.height, right_width);
+
+ rot_count += count * num_rows;
+ rescale_count += num_rows;
+ b2r2_log_info(cont->dev, "%s: rightmost: %d nodes\n", __func__,
+ count*num_rows);
+ }
+
+ /* Calculate node count for the bottom tiles */
+ if (bottom_height) {
+ u32 count = calc_rot_count(bottom_height, dst->win.width);
+
+ rot_count += count * num_cols;
+ rescale_count += num_cols;
+ b2r2_log_info(cont->dev, "%s: bottom: %d nodes\n", __func__,
+ count * num_cols);
+
+ }
+
+ /* And finally for the bottom right corner */
+ if (right_width && bottom_height) {
+ u32 count = calc_rot_count(bottom_height, right_width);
+
+ rot_count += count;
+ rescale_count++;
+ b2r2_log_info(cont->dev, "%s: bottom right: %d nodes\n",
+ __func__, count);
+
+ }
+
+ *node_count = rot_count * nodes_per_rot;
+ *node_count += rescale_count * nodes_per_rescale;
+ *buf_count = 1;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+static int analyze_rot_scale_upscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ /* TODO: When upscaling we should optimally to the rotation first... */
+ return analyze_rot_scale_downscale(this, req, node_count, buf_count);
+}
+
+/**
+ * analyze_rot_scaling() - analyzes a combined rotation and scaling op
+ */
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ bool upscale;
+ struct b2r2_control *cont = req->instance->control;
+
+ ret = analyze_scale_factors(cont, this);
+ if (ret < 0)
+ goto error;
+
+ upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 20);
+
+ if (upscale)
+ ret = analyze_rot_scale_upscale(this, req, node_count,
+ buf_count);
+ else
+ ret = analyze_rot_scale_downscale(this, req, node_count,
+ buf_count);
+
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_SCALE_AND_ROTATE;
+
+ return 0;
+
+error:
+ return ret;
+}
+
+/**
+ * analyze_scaling() - analyze a rescale operation
+ */
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ u32 copy_count;
+ u32 nbr_cols;
+ s32 dst_w;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ ret = analyze_scale_factors(cont, this);
+ if (ret < 0)
+ goto error;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
+ &copy_count, this->fullrange);
+ if (ret < 0)
+ goto error;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /*
+ * We need to subtract from the actual maximum rescale width since the
+ * start of the stripe will be floored and the end ceiled. This could in
+ * some cases cause the stripe to be one pixel more than the maximum
+ * width.
+ *
+ * Example:
+ * x = 127.8, w = 127.8
+ *
+ * The stripe will touch pixels 127.8 through 255.6, i.e. 129 pixels.
+ */
+ dst_w = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ if (dst_w < (1 << 10))
+ dst_w = 1;
+ else
+ dst_w >>= 10;
+
+ b2r2_log_info(cont->dev, "%s: dst_w=%d dst.rect.width=%d\n",
+ __func__, dst_w, this->dst.rect.width);
+
+ this->dst.win.width = min(dst_w, this->dst.rect.width);
+
+ b2r2_log_info(cont->dev, "%s: dst.win.width=%d\n",
+ __func__, this->dst.win.width);
+
+ nbr_cols = this->dst.rect.width / this->dst.win.width;
+ if (this->dst.rect.width % this->dst.win.width)
+ nbr_cols++;
+
+ *node_count = copy_count * nbr_cols;
+
+ this->type = B2R2_SCALE;
+
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_rotate() - analyze a rotate operation
+ */
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ u32 nodes_per_tile;
+ struct b2r2_control *cont = req->instance->control;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
+ &nodes_per_tile, this->fullrange);
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_ROTATE;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ this->dst.win.height = min(this->dst.win.height, B2R2_ROTATE_MAX_WIDTH);
+
+ /*
+ * B2R2 cannot do rotations on stripes that are not a multiple of 16
+ * pixels high (if larger than 16 pixels).
+ */
+ if (this->dst.win.width > 16)
+ this->dst.win.width -= (this->dst.win.width % 16);
+
+ /* Blending cannot be combined with rotation */
+ if (this->blend) {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+ enum b2r2_blt_fmt tmp_fmt;
+
+ if (b2r2_is_yuv_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ else if (b2r2_is_bgr_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ else
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+
+ setup_tmp_buf(cont, tmp, this->max_buf_size, tmp_fmt,
+ this->dst.win.width, this->dst.win.height);
+
+ tmp->tmp_buf_index = 1;
+
+ tmp->vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ this->dst.win.width = tmp->rect.width;
+ this->dst.win.height = tmp->rect.height;
+
+ memcpy(&tmp->win, &tmp->rect, sizeof(tmp->win));
+
+ *buf_count = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ /*
+ * One more node per tile is required to rotate to the temp
+ * buffer.
+ */
+ nodes_per_tile++;
+ }
+
+ /* Finally, calculate the node count */
+ *node_count = nodes_per_tile *
+ calc_rot_count(this->src.rect.width, this->src.rect.height);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * analyze_scale_factors() - determines the scale factors for the op
+ */
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ u16 hsf;
+ u16 vsf;
+
+ if (this->rotation) {
+ ret = calculate_scale_factor(cont->dev, this->src.rect.width,
+ this->dst.rect.height, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(cont->dev, this->src.rect.height,
+ this->dst.rect.width, &vsf);
+ if (ret < 0)
+ goto error;
+ } else {
+ ret = calculate_scale_factor(cont->dev, this->src.rect.width,
+ this->dst.rect.width, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(cont->dev, this->src.rect.height,
+ this->dst.rect.height, &vsf);
+ if (ret < 0)
+ goto error;
+ }
+
+ this->h_rescale = hsf != (1 << 10);
+ this->v_rescale = vsf != (1 << 10);
+
+ this->h_rsf = hsf;
+ this->v_rsf = vsf;
+
+ b2r2_log_info(cont->dev, "%s: h_rsf=%.4x\n", __func__, this->h_rsf);
+ b2r2_log_info(cont->dev, "%s: v_rsf=%.4x\n", __func__, this->v_rsf);
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_tile() - configures one tile of a blit operation
+ */
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ int ret = 0;
+
+ struct b2r2_node *last;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *bg = &this->bg;
+
+ struct b2r2_blt_rect dst_norm;
+ struct b2r2_blt_rect src_norm;
+ struct b2r2_blt_rect bg_norm;
+
+ /* Normalize the dest coords to the dest rect coordinate space */
+ dst_norm.x = dst->win.x - dst->rect.x;
+ dst_norm.y = dst->win.y - dst->rect.y;
+ dst_norm.width = dst->win.width;
+ dst_norm.height = dst->win.height;
+
+ if (dst->vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ /* The y coord should be counted from the bottom */
+ dst_norm.y = dst->rect.height - (dst_norm.y + 1);
+ }
+ if (dst->hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ /* The x coord should be counted from the right */
+ dst_norm.x = dst->rect.width - (dst_norm.x + 1);
+ }
+
+ /* If the destination is rotated we should swap x, y */
+ if (this->rotation) {
+ src_norm.x = dst_norm.y;
+ src_norm.y = dst_norm.x;
+ src_norm.width = dst_norm.height;
+ src_norm.height = dst_norm.width;
+ } else {
+ src_norm.x = dst_norm.x;
+ src_norm.y = dst_norm.y;
+ src_norm.width = dst_norm.width;
+ src_norm.height = dst_norm.height;
+ }
+
+ /* Convert to src coordinate space */
+ src->win.x = src_norm.x + src->rect.x;
+ src->win.y = src_norm.y + src->rect.y;
+ src->win.width = src_norm.width;
+ src->win.height = src_norm.height;
+
+ /* Set bg norm */
+ bg_norm.x = dst->win.x - dst->rect.x;
+ bg_norm.y = dst->win.y - dst->rect.y;
+ bg_norm.width = dst->win.width;
+ bg_norm.height = dst->win.height;
+
+ /* Convert to bg coordinate space */
+ bg->win.x = bg_norm.x + bg->rect.x;
+ bg->win.y = bg_norm.y + bg->rect.y;
+ bg->win.width = bg_norm.width;
+ bg->win.height = bg_norm.height;
+ bg->vso = dst->vso;
+ bg->hso = dst->hso;
+
+ /* Do the configuration depending on operation type */
+ switch (this->type) {
+ case B2R2_DIRECT_FILL:
+ configure_direct_fill(cont, node, this->src.color, dst, &last);
+ break;
+
+ case B2R2_DIRECT_COPY:
+ configure_direct_copy(cont, node, src, dst, &last);
+ break;
+
+ case B2R2_FILL:
+ ret = configure_fill(cont, node, src->color, src->fmt,
+ dst, this->ivmx, &last);
+ break;
+
+ case B2R2_FLIP: /* FLIP is just a copy with different VSO/HSO */
+ case B2R2_COPY:
+ ret = configure_copy(
+ cont, node, src, dst, this->ivmx, &last, this);
+ break;
+
+ case B2R2_ROTATE:
+ {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ if (this->blend) {
+ b2r2_log_info(cont->dev, "%s: rotation + "
+ "blend\n", __func__);
+
+ tmp->win.x = 0;
+ tmp->win.y = tmp->win.height - 1;
+ tmp->win.width = dst->win.width;
+ tmp->win.height = dst->win.height;
+
+ /* Rotate to the temp buf */
+ ret = configure_rotate(cont, node, src, tmp,
+ this->ivmx, &node, NULL);
+ if (ret < 0)
+ goto error;
+
+ /* Then do a copy to the destination */
+ ret = configure_copy(cont, node, tmp, dst, NULL,
+ &last, this);
+ } else {
+ /* Just do a rotation */
+ ret = configure_rotate(cont, node, src, dst,
+ this->ivmx, &last, this);
+ }
+ }
+ break;
+
+ case B2R2_SCALE:
+ ret = configure_scale(cont, node, src, dst, this->h_rsf,
+ this->v_rsf, this->ivmx, &last, this);
+ break;
+
+ case B2R2_SCALE_AND_ROTATE:
+ ret = configure_rot_scale(cont, this, node, &last);
+ break;
+
+ default:
+ b2r2_log_warn(cont->dev, "%s: Unsupported request\n", __func__);
+ ret = -ENOSYS;
+ goto error;
+ break;
+
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Scale and rotate will configure its own blending and clipping */
+ if (this->type != B2R2_SCALE_AND_ROTATE) {
+
+ /* Configure blending and clipping */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (this->blend) {
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ configure_bg(cont, node, bg,
+ this->swap_fg_bg);
+ else
+ configure_bg(cont, node, dst,
+ this->swap_fg_bg);
+ configure_blend(cont, node, this->flags,
+ this->global_alpha);
+ }
+ if (this->clip)
+ configure_clip(cont, node, &this->clip_rect);
+
+ node = node->next;
+
+ } while (node != last);
+ }
+
+ /* Consume the nodes */
+ *next = last;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Error!\n", __func__);
+ return ret;
+}
+
+/*
+ * configure_sub_rot() - configure a sub-rotation
+ *
+ * This functions configures a set of nodes for rotation using the destination
+ * window instead of the rectangle for calculating tiles.
+ */
+static int configure_sub_rot(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *job)
+{
+ int ret;
+
+ struct b2r2_blt_rect src_win;
+ struct b2r2_blt_rect dst_win;
+
+ u32 y_pixels = 0;
+ u32 x_pixels = 0;
+
+ memcpy(&src_win, &src->win, sizeof(src_win));
+ memcpy(&dst_win, &dst->win, sizeof(dst_win));
+
+ b2r2_log_info(cont->dev, "%s: src_win=(%d, %d, %d, %d) "
+ "dst_win=(%d, %d, %d, %d)\n", __func__,
+ src_win.x, src_win.y, src_win.width, src_win.height,
+ dst_win.x, dst_win.y, dst_win.width, dst_win.height);
+
+ dst->win.height = B2R2_ROTATE_MAX_WIDTH;
+ if (dst->win.width % B2R2_ROTATE_MAX_WIDTH)
+ dst->win.width -= dst->win.width % B2R2_ROTATE_MAX_WIDTH;
+
+ while (x_pixels < dst_win.width) {
+ u32 src_x = src->win.x;
+ u32 src_w = src->win.width;
+ u32 dst_y = dst->win.y;
+ u32 dst_h = dst->win.height;
+
+ dst->win.width = min(dst->win.width, dst_win.width -
+ (int)x_pixels);
+ src->win.height = dst->win.width;
+
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
+
+ while (y_pixels < dst_win.height) {
+ dst->win.height = min(dst->win.height,
+ dst_win.height - (int)y_pixels);
+ src->win.width = dst->win.height;
+
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
+
+ ret = configure_rotate(cont, node, src, dst,
+ ivmx, &node, job);
+ if (ret < 0)
+ goto error;
+
+ src->win.x += (src->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ src->win.width : -src->win.width;
+ dst->win.y += (dst->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ dst->win.height : -dst->win.height;
+
+ y_pixels += dst->win.height;
+ }
+
+ src->win.x = src_x;
+ src->win.y += (src->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ src->win.height : -src->win.height;
+ src->win.width = src_w;
+
+ dst->win.x += (dst->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ dst->win.width : -dst->win.width;
+ dst->win.y = dst_y;
+ dst->win.height = dst_h;
+
+ x_pixels += dst->win.width;
+ y_pixels = 0;
+
+ }
+
+ memcpy(&src->win, &src_win, sizeof(src->win));
+ memcpy(&dst->win, &dst_win, sizeof(dst->win));
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_downscale() - configures a combined rotate and downscale
+ *
+ * When doing a downscale it is better to do the rotation last.
+ */
+static int configure_rot_downscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ tmp->win.x = 0;
+ tmp->win.y = 0;
+ tmp->win.width = dst->win.height;
+ tmp->win.height = dst->win.width;
+
+ ret = configure_scale(cont, node, src, tmp, this->h_rsf, this->v_rsf,
+ this->ivmx, &node, this);
+ if (ret < 0)
+ goto error;
+
+ ret = configure_sub_rot(cont, node, tmp, dst, NULL, &node, this);
+ if (ret < 0)
+ goto error;
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_info(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_upscale() - configures a combined rotate and upscale
+ *
+ * When doing an upscale it is better to do the rotation first.
+ */
+static int configure_rot_upscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ /* TODO: Implement a optimal upscale (rotation first) */
+ return configure_rot_downscale(cont, this, node, next);
+}
+
+/**
+ * configure_rot_scale() - configures a combined rotation and scaling op
+ */
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ int ret;
+
+ bool upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 10);
+
+ if (upscale)
+ ret = configure_rot_upscale(cont, this, node, next);
+ else
+ ret = configure_rot_downscale(cont, this, node, next);
+
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_direct_fill() - configures the given node for direct fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL | B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_FILL;
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Source setup */
+
+ /* It seems B2R2 checks so that source and dest has the same format */
+ node->node.GROUP3.B2R2_STY = b2r2_to_native_fmt(dst->fmt);
+ node->node.GROUP2.B2R2_S1CF = color;
+ node->node.GROUP2.B2R2_S2CF = 0;
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_direct_copy() - configures the node for direct copy
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_COPY;
+
+ /* Source setup, use the base function to avoid altering the INS */
+ set_src(&node->node.GROUP3, src->addr, src);
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_fill() - configures the given node for color fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @fmt - the source color format
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * A normal fill operation can be combined with any other per pixel operations
+ * such as blend.
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ struct b2r2_node *last;
+
+ /* Configure the destination */
+ ret = configure_dst(cont, node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_COLOR_FILL;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ /* B2R2 has a bug that disables color fill from S2. As a
+ workaround we use S1 for the color. */
+ node->node.GROUP2.B2R2_S1CF = 0;
+ node->node.GROUP2.B2R2_S2CF = color;
+
+ /* TO BE REMOVED: */
+ set_src_2(node, dst->addr, dst);
+ node->node.GROUP4.B2R2_STY = b2r2_to_native_fmt(fmt);
+
+ /* Setup the iVMX for color conversion */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+
+ if ((dst->type == B2R2_FMT_TYPE_PLANAR) ||
+ (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR)) {
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP8.B2R2_FCTL =
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+ node->node.GROUP9.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP10.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP10.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_copy() - configures the given node for a copy operation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_dst(cont, node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ /* Configure the source for each node */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ " Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ if (this != NULL &&
+ (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ != 0) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = b2r2_to_RGB888(this->flag_param, src->fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ if (this != NULL &&
+ (this->flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ struct b2r2_blt_request *request =
+ container_of(this, struct b2r2_blt_request,
+ node_split_job);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO =
+ B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = request->clut_phys_addr;
+ }
+ /* Configure the source(s) */
+ configure_src(cont, node, src, ivmx);
+
+ node = node->next;
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rotate() - configures the given node for rotation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes are are required by the combination
+ * of rotating and writing the destination format.
+ */
+static int configure_rotate(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+
+ b2r2_log_debug(cont->dev, "%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "-----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_scale() - configures the given node for scaling
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @h_rsf - the horizontal rescale factor
+ * @v_rsf - the vertical rescale factor
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ */
+static int configure_scale(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ struct b2r2_filter_spec *hf = NULL;
+ struct b2r2_filter_spec *vf = NULL;
+
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ u32 hsrc_init = 0;
+ u32 vsrc_init = 0;
+ u32 hfp = 0;
+ u32 vfp = 0;
+
+ u16 luma_h_rsf = h_rsf;
+ u16 luma_v_rsf = v_rsf;
+
+ struct b2r2_filter_spec *luma_hf = NULL;
+ struct b2r2_filter_spec *luma_vf = NULL;
+
+ u32 luma_fctl = 0;
+ u32 luma_rsf = 0;
+ u32 luma_rzi = 0;
+ u32 luma_hsrc_init = 0;
+ u32 luma_vsrc_init = 0;
+ u32 luma_hfp = 0;
+ u32 luma_vfp = 0;
+
+ s32 src_x;
+ s32 src_y;
+ s32 src_w;
+ s32 src_h;
+
+ bool upsample;
+ bool downsample;
+
+ struct b2r2_blt_rect tmp_win = src->win;
+ bool src_raster = src->type == B2R2_FMT_TYPE_RASTER;
+ bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER;
+
+ /* Rescale the normalized source window */
+ src_x = inv_rescale(src->win.x - src->rect.x, luma_h_rsf);
+ src_y = inv_rescale(src->win.y - src->rect.y, luma_v_rsf);
+ src_w = inv_rescale(src->win.width, luma_h_rsf);
+ src_h = inv_rescale(src->win.height, luma_v_rsf);
+
+ /* Convert to src coordinate space */
+ src->win.x = (src_x >> 10) + src->rect.x;
+ src->win.y = (src_y >> 10) + src->rect.y;
+
+ /*
+ * Since the stripe might start and end on a fractional pixel
+ * we need to count all the touched pixels in the width.
+ *
+ * Example:
+ * src_x = 1.8, src_w = 2.8
+ *
+ * The stripe touches pixels 1.8 through 4.6, i.e. 4 pixels
+ */
+ src->win.width = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src->win.height = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ luma_hsrc_init = src_x & 0x3ff;
+ luma_vsrc_init = src_y & 0x3ff;
+
+ /* Check for upsampling of chroma */
+ upsample = !src_raster && !b2r2_is_yuv444_fmt(src->fmt);
+ if (upsample) {
+ h_rsf /= 2;
+
+ if (b2r2_is_yuv420_fmt(src->fmt))
+ v_rsf /= 2;
+ }
+
+ /* Check for downsampling of chroma */
+ downsample = !dst_raster && !b2r2_is_yuv444_fmt(dst->fmt);
+ if (downsample) {
+ h_rsf *= 2;
+
+ if (b2r2_is_yuv420_fmt(dst->fmt))
+ v_rsf *= 2;
+ }
+
+ src_x = inv_rescale(tmp_win.x - src->rect.x, h_rsf);
+ src_y = inv_rescale(tmp_win.y - src->rect.y, v_rsf);
+ hsrc_init = src_x & 0x3ff;
+ vsrc_init = src_y & 0x3ff;
+
+ /* Configure resize and filters */
+ fctl = B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ luma_fctl = B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf = (h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+ luma_rsf = (luma_h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (luma_v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+
+ rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+ luma_rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * We should only filter if there is an actual rescale (i.e. not when
+ * up or downsampling).
+ */
+ if (luma_h_rsf != (1 << 10)) {
+ hf = b2r2_filter_find(h_rsf);
+ luma_hf = b2r2_filter_find(luma_h_rsf);
+ }
+ if (luma_v_rsf != (1 << 10)) {
+ vf = b2r2_filter_find(v_rsf);
+ luma_vf = b2r2_filter_find(luma_v_rsf);
+ }
+
+ if (hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ hfp = hf->h_coeffs_phys_addr;
+ }
+
+ if (vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ vfp = vf->v_coeffs_phys_addr;
+ }
+
+ if (luma_hf) {
+ luma_fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ luma_hfp = luma_hf->h_coeffs_phys_addr;
+ }
+
+ if (luma_vf) {
+ luma_fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ luma_vfp = luma_vf->v_coeffs_phys_addr;
+ }
+
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ bool chroma_rescale =
+ (h_rsf != (1 << 10)) || (v_rsf != (1 << 10));
+ bool luma_rescale =
+ (luma_h_rsf != (1 << 10)) ||
+ (luma_v_rsf != (1 << 10));
+ bool dst_chroma = node->node.GROUP1.B2R2_TTY &
+ B2R2_TTY_CHROMA_NOT_LUMA;
+ bool dst_luma = !dst_chroma;
+
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Internal error! Out "
+ "of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * If the source format is anything other than raster, we
+ * always have to enable both chroma and luma resizers. This
+ * could be a bug in the hardware, since it is not mentioned in
+ * the specification.
+ *
+ * Otherwise, we will only enable the chroma resizer when
+ * writing chroma and the luma resizer when writing luma
+ * (or both when writing raster). Also, if there is no rescale
+ * to be done there's no point in using the resizers.
+ */
+
+ if (!src_raster || (chroma_rescale &&
+ (dst_raster || dst_chroma))) {
+ /* Enable chroma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI = rzi;
+ node->node.GROUP9.B2R2_HFP = hfp;
+ node->node.GROUP9.B2R2_VFP = vfp;
+ }
+
+ if (!src_raster || (luma_rescale &&
+ (dst_raster || dst_luma))) {
+ /* Enable luma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA;
+ node->node.GROUP8.B2R2_FCTL |= luma_fctl;
+
+ node->node.GROUP10.B2R2_RSF = luma_rsf;
+ node->node.GROUP10.B2R2_RZI = luma_rzi;
+ node->node.GROUP10.B2R2_HFP = luma_hfp;
+ node->node.GROUP10.B2R2_VFP = luma_vfp;
+ /*
+ * Scaling operation from raster to a multi-buffer
+ * format, requires the raster input to be scaled
+ * before luminance information can be extracted.
+ * Raster input is scaled by the chroma resizer.
+ * Luma resizer only handles luminance data which
+ * exists in a separate buffer in source image,
+ * as is the case with YUV planar/semi-planar formats.
+ */
+ if (src_raster) {
+ /* Activate chroma scaling */
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ /*
+ * Color data must be scaled
+ * to the same size as luma.
+ * Use luma scaling parameters.
+ */
+ node->node.GROUP9.B2R2_RSF = luma_rsf;
+ node->node.GROUP9.B2R2_RZI = luma_rzi;
+ node->node.GROUP9.B2R2_HFP = luma_hfp;
+ node->node.GROUP9.B2R2_VFP = luma_vfp;
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_src() - configures the source registers and the iVMX
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @ivmx - the iVMX to use for color conversion
+ *
+ * This operation will not consume any nodes
+ */
+static void configure_src(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx)
+{
+ struct b2r2_node_split_buf tmp_buf;
+
+ b2r2_log_info(cont->dev,
+ "%s: src.win=(%d, %d, %d, %d)\n", __func__,
+ src->win.x, src->win.y, src->win.width,
+ src->win.height);
+
+ /* Configure S1 - S3 */
+ switch (src->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ set_src_2(node, src->addr, src);
+ break;
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ /*
+ * For 420 and 422 the chroma has lower resolution than the
+ * luma
+ */
+ if (!b2r2_is_yuv444_fmt(src->fmt)) {
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ if (b2r2_is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src);
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ if (!b2r2_is_yuv444_fmt(src->fmt)) {
+ /*
+ * Each chroma buffer will have half as many values
+ * per line as the luma buffer
+ */
+ tmp_buf.pitch = (tmp_buf.pitch + 1) / 2;
+
+ /* Horizontal resolution is half */
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well
+ */
+ if (b2r2_is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src); /* Y */
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf); /* U */
+ set_src_1(node, tmp_buf.chroma_cr_addr, &tmp_buf); /* V */
+
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+
+ /* Configure the iVMX for color space conversions */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+}
+
+/**
+ * configure_bg() - configures a background for the given node
+ *
+ * @node - the node to configure
+ * @bg - the background buffer
+ * @swap_fg_bg - if true, fg will be on s1 instead of s2
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_bg(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *bg, bool swap_fg_bg)
+{
+ b2r2_log_info(cont->dev,
+ "%s: bg.win=(%d, %d, %d, %d)\n", __func__,
+ bg->win.x, bg->win.y, bg->win.width,
+ bg->win.height);
+
+ /* Configure S1 */
+ switch (bg->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ if (swap_fg_bg) {
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_SWAP_FG_BG;
+
+ set_src(&node->node.GROUP4, bg->addr, bg);
+ } else {
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP3, bg->addr, bg);
+ }
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+}
+
+/**
+ * configure_dst() - configures the destination registers of the given node
+ *
+ * @node - the node to configure
+ * @ivmx - the iVMX to use for color conversion
+ * @dst - the destination buffer
+ *
+ * This operation will consume as many nodes as are required to write the
+ * destination format.
+ */
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ int nbr_planes = 1;
+ int i;
+
+ struct b2r2_node_split_buf dst_planes[3];
+
+ b2r2_log_info(cont->dev,
+ "%s: dst.win=(%d, %d, %d, %d)\n", __func__,
+ dst->win.x, dst->win.y, dst->win.width,
+ dst->win.height);
+
+ memcpy(&dst_planes[0], dst, sizeof(dst_planes[0]));
+
+ if (dst->type != B2R2_FMT_TYPE_RASTER) {
+ /* There will be at least 2 planes */
+ nbr_planes = 2;
+
+ memcpy(&dst_planes[1], dst, sizeof(dst_planes[1]));
+
+ dst_planes[1].addr = dst->chroma_addr;
+ dst_planes[1].plane_selection = B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (!b2r2_is_yuv444_fmt(dst->fmt)) {
+ /* Horizontal resolution is half */
+ dst_planes[1].win.x /= 2;
+ /*
+ * Must round up the chroma size to handle cases when
+ * luma size is not divisible by 2. E.g. luma width==7 r
+ * equires chroma width==4. Chroma width==7/2==3 is only
+ * enough for luma width==6.
+ */
+ dst_planes[1].win.width =
+ (dst_planes[1].win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well. Height must be rounded in
+ * the same way as is done for width.
+ */
+ if (b2r2_is_yuv420_fmt(dst->fmt)) {
+ dst_planes[1].win.y /= 2;
+ dst_planes[1].win.height =
+ (dst_planes[1].win.height + 1) / 2;
+ }
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ /* There will be a third plane as well */
+ nbr_planes = 3;
+
+ if (!b2r2_is_yuv444_fmt(dst->fmt)) {
+ /* The chroma planes have half the luma pitch */
+ dst_planes[1].pitch /= 2;
+ }
+
+ memcpy(&dst_planes[2], &dst_planes[1],
+ sizeof(dst_planes[2]));
+ dst_planes[2].addr = dst->chroma_cr_addr;
+
+ /*
+ * The third plane will be Cr.
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * B2R2_TTY_CR_NOT_CB.
+ */
+ dst_planes[2].chroma_selection = B2R2_TTY_CB_NOT_CR;
+ }
+
+ }
+
+ /* Configure one node for each plane */
+ for (i = 0; i < nbr_planes; i++) {
+
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * When writing chroma, there's no need to read the luma and
+ * vice versa.
+ */
+ if ((node->node.GROUP3.B2R2_STY & B2R2_NATIVE_YUV) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ if (i != 1) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ }
+ if (i != 2) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER;
+ }
+ } else if ((node->node.GROUP3.B2R2_STY &
+ (B2R2_NATIVE_YCBCR42X_MBN |
+ B2R2_NATIVE_YCBCR42X_R2B)) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ }
+
+ set_target(node, dst_planes[i].addr, &dst_planes[i]);
+
+ node = node->next;
+ }
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * configure_blend() - configures the given node for alpha blending
+ *
+ * @node - the node to configure
+ * @flags - the flags passed in the blt_request
+ * @global_alpha - the global alpha to use (if enabled in flags)
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_blend(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 flags, u32 global_alpha)
+{
+ node->node.GROUP0.B2R2_ACK &= ~(B2R2_ACK_MODE_BYPASS_S2_S3);
+
+ /* Check if the foreground is premultiplied */
+ if ((flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT) != 0)
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /* Check if global alpha blend should be enabled */
+ if (flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+
+ /* B2R2 expects the global alpha to be in 0...128 range */
+ global_alpha = (global_alpha*128)/255;
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << B2R2_ACK_GALPHA_ROPID_SHIFT;
+ } else {
+ node->node.GROUP0.B2R2_ACK |=
+ (128 << B2R2_ACK_GALPHA_ROPID_SHIFT);
+ }
+}
+
+/**
+ * configure_clip() - configures destination clipping for the given node
+ *
+ * @node - the node to configure
+ * @clip_rect - the clip rectangle
+ *
+ * This operation does not consume any nodes.
+ */
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect)
+{
+ s32 l = clip_rect->x;
+ s32 r = clip_rect->x + clip_rect->width - 1;
+ s32 t = clip_rect->y;
+ s32 b = clip_rect->y + clip_rect->height - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+
+ /* Clip window setup */
+ node->node.GROUP6.B2R2_CWO =
+ ((t & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((l & 0x7FFF) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT);
+}
+
+/**
+ * set_buf() - configures the given buffer with the provided values
+ *
+ * @addr - the physical base address
+ * @img - the blt image to base the buffer on
+ * @rect - the rectangle to use
+ * @color_fill - determines whether the buffer should be used for color fill
+ * @color - the color to use in case of color fill
+ */
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ bool color_fill,
+ u32 color)
+{
+ memset(buf, 0, sizeof(*buf));
+
+ buf->fmt = img->fmt;
+ buf->type = b2r2_get_fmt_type(img->fmt);
+
+ if (color_fill) {
+ buf->type = B2R2_FMT_TYPE_RASTER;
+ buf->color = color;
+ } else {
+ buf->addr = addr;
+
+ buf->alpha_range = b2r2_get_alpha_range(img->fmt);
+
+ if (img->pitch == 0)
+ buf->pitch = b2r2_fmt_byte_pitch(img->fmt, img->width);
+ else
+ buf->pitch = img->pitch;
+
+ buf->height = img->height;
+ buf->width = img->width;
+
+ switch (buf->type) {
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ if (b2r2_is_yuv422_fmt(buf->fmt) ||
+ b2r2_is_yuv420_fmt(buf->fmt)) {
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ } else {
+ buf->chroma_cr_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ }
+ if (b2r2_is_yuv420_fmt(buf->fmt)) {
+ /*
+ * Use ceil(height/2) in case
+ * buffer height is not divisible by 2.
+ */
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (b2r2_is_yuv422_fmt(buf->fmt)) {
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) * buf->height);
+ } else if (b2r2_is_yvu420_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (b2r2_is_yvu422_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) * buf->height);
+ }
+ break;
+ default:
+ break;
+ }
+
+ memcpy(&buf->rect, rect, sizeof(buf->rect));
+ }
+}
+
+/**
+ * setup_tmp_buf() - configure a temporary buffer
+ */
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *tmp,
+ u32 max_size,
+ enum b2r2_blt_fmt pref_fmt,
+ u32 pref_width,
+ u32 pref_height)
+{
+ int ret;
+
+ enum b2r2_blt_fmt fmt;
+
+ u32 width;
+ u32 height;
+ u32 pitch;
+ u32 size;
+
+ /* Determine what format we should use for the tmp buf */
+ if (b2r2_is_rgb_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (b2r2_is_bgr_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ } else if (b2r2_is_yvu_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_CB_Y_CR_Y;
+ } else if (b2r2_is_yuv_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Cannot create tmp buf from this fmt (%d)\n",
+ __func__, pref_fmt);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* See if we can fit the entire preferred rectangle */
+ width = pref_width;
+ height = pref_height;
+ pitch = b2r2_fmt_byte_pitch(fmt, width);
+ size = pitch * height;
+
+ if (size > max_size) {
+ /* We need to limit the size, so we choose a different width */
+ width = min(width, (u32) B2R2_RESCALE_MAX_WIDTH);
+ pitch = b2r2_fmt_byte_pitch(fmt, width);
+ height = min(height, max_size / pitch);
+ size = pitch * height;
+ }
+
+ /* We should at least have enough room for one scanline */
+ if (height == 0) {
+ b2r2_log_warn(cont->dev, "%s: Not enough tmp mem!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+
+ tmp->fmt = fmt;
+ tmp->type = B2R2_FMT_TYPE_RASTER;
+ tmp->height = height;
+ tmp->width = width;
+ tmp->pitch = pitch;
+
+ tmp->rect.width = width;
+ tmp->rect.height = tmp->height;
+ tmp->alpha_range = B2R2_TY_ALPHA_RANGE_255;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * is_transform() - returns whether the given request is a transform operation
+ */
+static bool is_transform(const struct b2r2_blt_request *req)
+{
+ return (req->user_req.transform != B2R2_BLT_TRANSFORM_NONE) ||
+ (req->user_req.src_rect.width !=
+ req->user_req.dst_rect.width) ||
+ (req->user_req.src_rect.height !=
+ req->user_req.dst_rect.height);
+}
+
+/**
+ * rescale() - rescales the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf)
+{
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (sf == 0) {
+ b2r2_log_err(cont->dev, "%s: Scale factor is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ /*
+ * This is normally not safe to do, since it drastically decreases the
+ * precision of the integer part of the dimension. But since the B2R2
+ * hardware only has 12-bit registers for these values, we are safe.
+ */
+ return (dim << 20) / sf;
+}
+
+/**
+ * inv_rescale() - does an inverted rescale of the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 inv_rescale(s32 dim, u16 sf)
+{
+ if (sf == 0)
+ return dim;
+
+ return dim * sf;
+}
+
+/**
+ * set_target() - sets the target registers of the given node
+ */
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ s32 l;
+ s32 r;
+ s32 t;
+ s32 b;
+
+ if (buf->tmp_buf_index)
+ node->dst_tmp_index = buf->tmp_buf_index;
+
+ node->node.GROUP1.B2R2_TBA = addr;
+ node->node.GROUP1.B2R2_TTY = buf->pitch | b2r2_to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->chroma_selection | buf->hso |
+ buf->vso | buf->dither | buf->plane_selection;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP1.B2R2_TTY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Check if the rectangle is outside the buffer */
+ if (buf->vso == B2R2_TY_VSO_BOTTOM_TO_TOP)
+ t = buf->win.y - (buf->win.height - 1);
+ else
+ t = buf->win.y;
+
+ if (buf->hso == B2R2_TY_HSO_RIGHT_TO_LEFT)
+ l = buf->win.x - (buf->win.width - 1);
+ else
+ l = buf->win.x;
+
+ r = l + buf->win.width;
+ b = t + buf->win.height;
+
+ /* Clip to the destination buffer to prevent memory overwrites */
+ if ((l < 0) || (r > buf->width) || (t < 0) || (b > buf->height)) {
+ /* The clip rectangle is including the borders */
+ l = max(l, 0);
+ r = min(r, (s32) buf->width) - 1;
+ t = max(t, 0);
+ b = min(b, (s32) buf->height) - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP6.B2R2_CWO =
+ ((l & 0x7FFF) << B2R2_CWS_X_SHIFT) |
+ ((t & 0x7FFF) << B2R2_CWS_Y_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT) |
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT);
+ }
+
+}
+
+/**
+ * set_src() - configures the given source register with the given values
+ */
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ src->B2R2_SBA = addr;
+ src->B2R2_STY = buf->pitch | b2r2_to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ src->B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ src->B2R2_SSZ = ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ src->B2R2_SXY = ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+}
+
+/**
+ * set_src_1() - sets the source 1 registers of the given node
+ */
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ node->node.GROUP3.B2R2_SBA = addr;
+ node->node.GROUP3.B2R2_STY = buf->pitch | b2r2_to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP3.B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP3.B2R2_SXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Source 1 has no size register */
+}
+
+/**
+ * set_src_2() - sets the source 2 registers of the given node
+ */
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 2;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP4, addr, buf);
+}
+
+/**
+ * set_src_3() - sets the source 3 registers of the given node
+ */
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 3;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_3;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP5, addr, buf);
+}
+
+/**
+ * set_ivmx() - configures the iVMX registers with the given values
+ */
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+
+ node->node.GROUP15.B2R2_VMX0 = vmx_values[0];
+ node->node.GROUP15.B2R2_VMX1 = vmx_values[1];
+ node->node.GROUP15.B2R2_VMX2 = vmx_values[2];
+ node->node.GROUP15.B2R2_VMX3 = vmx_values[3];
+}
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_node *node)
+{
+ while (node != NULL) {
+ memset(&node->node, 0, sizeof(node->node));
+
+ node->src_tmp_index = 0;
+ node->dst_tmp_index = 0;
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7ffff;
+
+ if (node->next != NULL)
+ node->node.GROUP0.B2R2_NIP =
+ node->next->physical_address;
+ node = node->next;
+ }
+}
+
+int b2r2_node_split_init(struct b2r2_control *cont)
+{
+ return 0;
+}
+
+void b2r2_node_split_exit(struct b2r2_control *cont)
+{
+
+}
diff --git a/drivers/video/b2r2/b2r2_node_split.h b/drivers/video/b2r2/b2r2_node_split.h
new file mode 100644
index 00000000000..a577241c31b
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_NODE_SPLIT_H_
+#define __B2R2_NODE_SPLIT_H_
+
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+
+/**
+ * b2r2_node_split_analyze() - Analyzes a B2R2 request
+ *
+ * @req - The request to analyze
+ * @max_buf_size - The largest size allowed for intermediate buffers
+ * @node_count - Number of nodes required for the job
+ * @buf_count - Number of intermediate buffers required for the job
+ * @bufs - An array of buffers needed for intermediate buffers
+ *
+ * Analyzes the request and determines how many nodes and intermediate buffers
+ * are required.
+ *
+ * It is the responsibility of the caller to allocate memory and assign the
+ * physical addresses. After that b2r2_node_split_assign_buffers should be
+ * called to assign the buffers to the right nodes.
+ *
+ * Returns:
+ * A handle identifing the analyzed request if successful, a negative
+ * value otherwise.
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req, u32 max_buf_size,
+ u32 *node_count, struct b2r2_work_buf **bufs, u32* buf_count,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_configure() - Performs a node split
+ *
+ * @handle - A handle for the analyzed request
+ * @first - The first node in the list of nodes to use
+ *
+ * Fills the supplied list of nodes with the parameters acquired by analyzing
+ * the request.
+ *
+ * All pointers to intermediate buffers are represented by integers to be used
+ * in the array returned by b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job, struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_assign_buffers() - Assignes physical addresses
+ *
+ * @handle - The handle for the job
+ * @first - The first node in the node list
+ * @bufs - Buffers with assigned physical addresses
+ * @buf_count - Number of physical addresses
+ *
+ * Assigns the physical addresses where intermediate buffers are required in
+ * the node list.
+ *
+ * The order of the elements of 'bufs' must be maintained from the call to
+ * b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
+ struct b2r2_node *first, struct b2r2_work_buf *bufs,
+ u32 buf_count);
+
+/**
+ * b2r2_node_split_unassign_buffers() - Removes all physical addresses
+ *
+ * @handle - The handle associated with the job
+ * @first - The first node in the node list
+ *
+ * Removes all references to intermediate buffers from the node list.
+ *
+ * This makes it possible to reuse the node list with new buffers by calling
+ * b2r2_node_split_assign_buffers again. Useful for caching node lists.
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
+ struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_release() - Releases all resources for a job
+ *
+ * @handle - The handle identifying the job. This will be set to 0.
+ *
+ * Releases all resources associated with a job.
+ *
+ * This should always be called once b2r2_node_split_analyze has been called
+ * in order to release any resources allocated while analyzing.
+ */
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_init() - Initializes the node split module
+ *
+ * Initializes the node split module and creates debugfs files.
+ */
+int b2r2_node_split_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_node_split_exit() - Deinitializes the node split module
+ *
+ * Releases all resources for the node split module.
+ */
+void b2r2_node_split_exit(struct b2r2_control *cont);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_profiler/Makefile b/drivers/video/b2r2/b2r2_profiler/Makefile
new file mode 100644
index 00000000000..69a85524fd7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/Makefile
@@ -0,0 +1,3 @@
+# Make file for loadable module B2R2 Profiler
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler.o
diff --git a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
new file mode 100644
index 00000000000..e038941b4e8
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson B2R2 profiler implementation
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <video/b2r2_blt.h>
+#include "../b2r2_profiler_api.h"
+
+
+#define S32_MAX 2147483647
+
+
+static int src_format_filter_on = false;
+module_param(src_format_filter_on, bool, S_IRUGO | S_IWUSR);
+static unsigned int src_format_filter;
+module_param(src_format_filter, uint, S_IRUGO | S_IWUSR);
+
+static int print_blts_on = 0;
+module_param(print_blts_on, bool, S_IRUGO | S_IWUSR);
+static int use_mpix_per_second_in_print_blts = 1;
+module_param(use_mpix_per_second_in_print_blts, bool, S_IRUGO | S_IWUSR);
+
+static int profiler_stats_on = 1;
+module_param(profiler_stats_on, bool, S_IRUGO | S_IWUSR);
+
+static const unsigned int profiler_stats_blts_used = 400;
+static struct {
+ unsigned long sampling_start_time_jiffies;
+
+ s32 min_mpix_per_second;
+ struct b2r2_blt_req min_blt_request;
+ struct b2r2_blt_profiling_info min_blt_profiling_info;
+
+ s32 max_mpix_per_second;
+ struct b2r2_blt_req max_blt_request;
+ struct b2r2_blt_profiling_info max_blt_profiling_info;
+
+ s32 accumulated_num_pixels;
+ s32 accumulated_num_usecs;
+
+ u32 num_blts_done;
+} profiler_stats;
+
+
+static s32 nsec_2_usec(const s32 nsec);
+
+static int is_scale_blt(const struct b2r2_blt_req * const request);
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request);
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs);
+static void print_profiler_stats(void);
+static void reset_profiler_stats(void);
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static void blt_done(const struct b2r2_blt_req * const blt,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+
+static struct b2r2_profiler this = {
+ .blt_done = blt_done,
+};
+
+
+static s32 nsec_2_usec(const s32 nsec)
+{
+ return nsec / 1000;
+}
+
+
+static int is_scale_blt(const struct b2r2_blt_req * const request)
+{
+ if ((request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90 &&
+ (request->src_rect.width !=
+ request->dst_rect.height ||
+ request->src_rect.height !=
+ request->dst_rect.width)) ||
+ (!(request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) &&
+ (request->src_rect.width !=
+ request->dst_rect.width ||
+ request->src_rect.height !=
+ request->dst_rect.height)))
+ return 1;
+ else
+ return 0;
+}
+
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ return get_mpix_per_second(get_num_pixels_in_blt(request),
+ nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2));
+}
+
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ char tmp_str[128];
+ sprintf(tmp_str, "SF: %#10x, DF: %#10x, F: %#10x, T: %#3x, S: %1i, P: %7i",
+ request->src_img.fmt,
+ request->dst_img.fmt,
+ request->flags,
+ request->transform,
+ is_scale_blt(request),
+ get_num_pixels_in_blt(request));
+ if (use_mpix_per_second_in_print_blts)
+ printk(KERN_ALERT "%s, MPix/s: %3i\n", tmp_str,
+ get_blt_mpix_per_second(request, blt_profiling_info));
+ else
+ printk(KERN_ALERT "%s, CPU: %10i, B2R2: %10i, Tot: %10i ns\n",
+ tmp_str, blt_profiling_info->nsec_active_in_cpu,
+ blt_profiling_info->nsec_active_in_b2r2,
+ blt_profiling_info->total_time_nsec);
+}
+
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request)
+{
+ s32 num_pixels_in_src = request->src_rect.width * request->src_rect.height;
+ s32 num_pixels_in_dst = request->dst_rect.width * request->dst_rect.height;
+ if (request->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW))
+ return num_pixels_in_dst;
+ else
+ return (num_pixels_in_src + num_pixels_in_dst) / 2;
+}
+
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs)
+{
+ s32 num_pixels_scale_factor = num_pixels != 0 ?
+ S32_MAX / num_pixels : S32_MAX;
+ s32 num_usecs_scale_factor = num_usecs != 0 ?
+ S32_MAX / num_usecs : S32_MAX;
+ s32 scale_factor = min(num_pixels_scale_factor, num_usecs_scale_factor);
+
+ s32 num_pixels_scaled = num_pixels * scale_factor;
+ s32 num_usecs_scaled = num_usecs * scale_factor;
+
+ if (num_usecs_scaled < 1000000)
+ return 0;
+
+ return (num_pixels_scaled / 1000000) / (num_usecs_scaled / 1000000);
+}
+
+static void print_profiler_stats(void)
+{
+ printk(KERN_ALERT "Min: %3i, Avg: %3i, Max: %3i MPix/s\n",
+ profiler_stats.min_mpix_per_second,
+ get_mpix_per_second(
+ profiler_stats.accumulated_num_pixels,
+ profiler_stats.accumulated_num_usecs),
+ profiler_stats.max_mpix_per_second);
+ printk(KERN_ALERT "Min blit:\n");
+ print_blt(&profiler_stats.min_blt_request,
+ &profiler_stats.min_blt_profiling_info);
+ printk(KERN_ALERT "Max blit:\n");
+ print_blt(&profiler_stats.max_blt_request,
+ &profiler_stats.max_blt_profiling_info);
+}
+
+static void reset_profiler_stats(void)
+{
+ profiler_stats.sampling_start_time_jiffies = jiffies;
+ profiler_stats.min_mpix_per_second = S32_MAX;
+ profiler_stats.max_mpix_per_second = 0;
+ profiler_stats.accumulated_num_pixels = 0;
+ profiler_stats.accumulated_num_usecs = 0;
+ profiler_stats.num_blts_done = 0;
+}
+
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ s32 num_pixels_in_blt;
+ s32 num_usec_blt_took;
+ s32 blt_mpix_per_second;
+
+ if (time_before(jiffies, profiler_stats.sampling_start_time_jiffies))
+ return;
+
+ num_pixels_in_blt = get_num_pixels_in_blt(request);
+ num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2);
+ blt_mpix_per_second = get_mpix_per_second(num_pixels_in_blt,
+ num_usec_blt_took);
+
+ if (blt_mpix_per_second <=
+ profiler_stats.min_mpix_per_second) {
+ profiler_stats.min_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.min_blt_request,
+ request, sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.min_blt_profiling_info,
+ blt_profiling_info,
+ sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ if (blt_mpix_per_second >= profiler_stats.max_mpix_per_second) {
+ profiler_stats.max_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.max_blt_request, request,
+ sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.max_blt_profiling_info,
+ blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ profiler_stats.accumulated_num_pixels += num_pixels_in_blt;
+ profiler_stats.accumulated_num_usecs += num_usec_blt_took;
+ profiler_stats.num_blts_done++;
+
+ if (profiler_stats.num_blts_done >= profiler_stats_blts_used) {
+ print_profiler_stats();
+ reset_profiler_stats();
+ /* The printouts initiated above can disturb the next measurement
+ so we delay it two seconds to give the printouts a chance to finish. */
+ profiler_stats.sampling_start_time_jiffies = jiffies + (2 * HZ);
+ }
+}
+
+static void blt_done(const struct b2r2_blt_req * const request,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ /* Filters */
+ if (src_format_filter_on && request->src_img.fmt != src_format_filter)
+ return;
+
+ /* Processors */
+ if (print_blts_on)
+ print_blt(request, blt_profiling_info);
+
+ if (profiler_stats_on)
+ do_profiler_stats(request, blt_profiling_info);
+}
+
+
+static int __init b2r2_profiler_init(void)
+{
+ reset_profiler_stats();
+
+ return b2r2_register_profiler(&this);
+}
+module_init(b2r2_profiler_init);
+
+static void __exit b2r2_profiler_exit(void)
+{
+ b2r2_unregister_profiler(&this);
+}
+module_exit(b2r2_profiler_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Johan Mossberg (johan.xx.mossberg@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Profiler");
diff --git a/drivers/video/b2r2/b2r2_profiler_api.h b/drivers/video/b2r2/b2r2_profiler_api.h
new file mode 100644
index 00000000000..5f1f9abbe49
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_api.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiling API
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_API_H
+#define _LINUX_VIDEO_B2R2_PROFILER_API_H
+
+#include <video/b2r2_blt.h>
+
+/**
+ * struct b2r2_blt_profiling_info - Profiling information for a blit
+ *
+ * @nsec_active_in_cpu: The number of nanoseconds the job was active in the CPU.
+ * This is an approximate value, check out the code for more
+ * info.
+ * @nsec_active_in_b2r2: The number of nanoseconds the job was active in B2R2. This
+ * is an approximate value, check out the code for more info.
+ * @total_time_nsec: The total time the job took in nano seconds. Includes ideling.
+ */
+struct b2r2_blt_profiling_info {
+ s32 nsec_active_in_cpu;
+ s32 nsec_active_in_b2r2;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_profiler - B2R2 profiler.
+ *
+ * The callbacks are never run concurrently. No heavy stuff must be done in the
+ * callbacks as this might adversely affect the B2R2 driver. The callbacks must
+ * not call the B2R2 profiler API as this will cause a deadlock. If the callbacks
+ * call into the B2R2 driver care must be taken as deadlock situations can arise.
+ *
+ * @blt_done: Called when a blit has finished, timed out or been canceled.
+ */
+struct b2r2_profiler {
+ void (*blt_done)(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+};
+
+/**
+ * b2r2_register_profiler() - Registers a profiler.
+ *
+ * Currently only one profiler can be registered at any given time.
+ *
+ * @profiler: The profiler
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler);
+
+/**
+ * b2r2_unregister_profiler() - Unregisters a profiler.
+ *
+ * @profiler: The profiler
+ */
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler);
+
+#endif /* #ifdef _LINUX_VIDEO_B2R2_PROFILER_API_H */
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.c b/drivers/video/b2r2/b2r2_profiler_socket.c
new file mode 100644
index 00000000000..cb95af9380e
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <asm/errno.h>
+
+#include "b2r2_profiler_api.h"
+#include "b2r2_internal.h"
+#include "b2r2_core.h"
+
+/*
+ * TODO: Call the profiler in a seperate thread and have a circular buffer
+ * between the B2R2 driver and that thread. That way the profiler can not slow
+ * down or kill the B2R2 driver. Seems a bit overkill right now as there is
+ * only one B2R2 profiler and we have full control over it but the situation
+ * may be different in the future.
+ */
+
+
+static const struct b2r2_profiler *b2r2_profiler;
+static DEFINE_SEMAPHORE(b2r2_profiler_lock);
+
+
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler)
+{
+ int return_value;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0)
+ return return_value;
+
+ if (b2r2_profiler != NULL) {
+ return_value = -EUSERS;
+
+ goto cleanup;
+ }
+
+ b2r2_profiler = profiler;
+
+ return_value = 0;
+
+cleanup:
+ up(&b2r2_profiler_lock);
+
+ return return_value;
+}
+EXPORT_SYMBOL(b2r2_register_profiler);
+
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler)
+{
+ down(&b2r2_profiler_lock);
+
+ if (profiler == b2r2_profiler)
+ b2r2_profiler = NULL;
+
+ up(&b2r2_profiler_lock);
+}
+EXPORT_SYMBOL(b2r2_unregister_profiler);
+
+
+bool is_profiler_registered_approx(void)
+{
+ /* No locking by design, to make it fast, hence the approx */
+ if (b2r2_profiler != NULL)
+ return true;
+ else
+ return false;
+}
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request)
+{
+ int return_value;
+ struct b2r2_blt_profiling_info blt_profiling_info;
+ struct b2r2_core *core = (struct b2r2_core *) request->job.data;
+ struct b2r2_control *cont = core->control;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0) {
+ dev_err(cont->dev,
+ "%s: Failed to acquire semaphore, ret=%i. "
+ "Lost profiler call!\n", __func__, return_value);
+
+ return;
+ }
+
+ if (NULL == b2r2_profiler)
+ goto cleanup;
+
+ blt_profiling_info.nsec_active_in_cpu = request->nsec_active_in_cpu;
+ blt_profiling_info.nsec_active_in_b2r2 = request->job.nsec_active_in_hw;
+ blt_profiling_info.total_time_nsec = request->total_time_nsec;
+
+ b2r2_profiler->blt_done(&request->user_req, request->request_id, &blt_profiling_info);
+
+cleanup:
+ up(&b2r2_profiler_lock);
+}
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.h b/drivers/video/b2r2/b2r2_profiler_socket.h
new file mode 100644
index 00000000000..80b2c20293f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+#define _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+
+#include "b2r2_internal.h"
+
+/* Will give a correct result most of the time but can be wrong */
+bool is_profiler_registered_approx(void);
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request);
+
+#endif /* _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H */
diff --git a/drivers/video/b2r2/b2r2_structures.h b/drivers/video/b2r2/b2r2_structures.h
new file mode 100644
index 00000000000..99fa7f047d3
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_structures.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 register struct
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef __B2R2_STRUCTURES_H
+#define __B2R2_STRUCTURES_H
+
+/* C struct view */
+struct b2r2_memory_map {
+ unsigned char fill0[2304];
+ unsigned int BLT_SSBA17; /* @2304 */
+ unsigned int BLT_SSBA18; /* @2308 */
+ unsigned int BLT_SSBA19; /* @2312 */
+ unsigned int BLT_SSBA20; /* @2316 */
+ unsigned int BLT_SSBA21; /* @2320 */
+ unsigned int BLT_SSBA22; /* @2324 */
+ unsigned int BLT_SSBA23; /* @2328 */
+ unsigned int BLT_SSBA24; /* @2332 */
+ unsigned char fill1[32];
+ unsigned int BLT_STBA5; /* @2368 */
+ unsigned int BLT_STBA6; /* @2372 */
+ unsigned int BLT_STBA7; /* @2376 */
+ unsigned int BLT_STBA8; /* @2380 */
+ unsigned char fill2[176];
+ unsigned int BLT_CTL; /* @2560 */
+ unsigned int BLT_ITS; /* @2564 */
+ unsigned int BLT_STA1; /* @2568 */
+ unsigned char fill3[4];
+ unsigned int BLT_SSBA1; /* @2576 */
+ unsigned int BLT_SSBA2; /* @2580 */
+ unsigned int BLT_SSBA3; /* @2584 */
+ unsigned int BLT_SSBA4; /* @2588 */
+ unsigned int BLT_SSBA5; /* @2592 */
+ unsigned int BLT_SSBA6; /* @2596 */
+ unsigned int BLT_SSBA7; /* @2600 */
+ unsigned int BLT_SSBA8; /* @2604 */
+ unsigned int BLT_STBA1; /* @2608 */
+ unsigned int BLT_STBA2; /* @2612 */
+ unsigned int BLT_STBA3; /* @2616 */
+ unsigned int BLT_STBA4; /* @2620 */
+ unsigned int BLT_CQ1_TRIG_IP; /* @2624 */
+ unsigned int BLT_CQ1_TRIG_CTL; /* @2628 */
+ unsigned int BLT_CQ1_PACE_CTL; /* @2632 */
+ unsigned int BLT_CQ1_IP; /* @2636 */
+ unsigned int BLT_CQ2_TRIG_IP; /* @2640 */
+ unsigned int BLT_CQ2_TRIG_CTL; /* @2644 */
+ unsigned int BLT_CQ2_PACE_CTL; /* @2648 */
+ unsigned int BLT_CQ2_IP; /* @2652 */
+ unsigned int BLT_AQ1_CTL; /* @2656 */
+ unsigned int BLT_AQ1_IP; /* @2660 */
+ unsigned int BLT_AQ1_LNA; /* @2664 */
+ unsigned int BLT_AQ1_STA; /* @2668 */
+ unsigned int BLT_AQ2_CTL; /* @2672 */
+ unsigned int BLT_AQ2_IP; /* @2676 */
+ unsigned int BLT_AQ2_LNA; /* @2680 */
+ unsigned int BLT_AQ2_STA; /* @2684 */
+ unsigned int BLT_AQ3_CTL; /* @2688 */
+ unsigned int BLT_AQ3_IP; /* @2692 */
+ unsigned int BLT_AQ3_LNA; /* @2696 */
+ unsigned int BLT_AQ3_STA; /* @2700 */
+ unsigned int BLT_AQ4_CTL; /* @2704 */
+ unsigned int BLT_AQ4_IP; /* @2708 */
+ unsigned int BLT_AQ4_LNA; /* @2712 */
+ unsigned int BLT_AQ4_STA; /* @2716 */
+ unsigned int BLT_SSBA9; /* @2720 */
+ unsigned int BLT_SSBA10; /* @2724 */
+ unsigned int BLT_SSBA11; /* @2728 */
+ unsigned int BLT_SSBA12; /* @2732 */
+ unsigned int BLT_SSBA13; /* @2736 */
+ unsigned int BLT_SSBA14; /* @2740 */
+ unsigned int BLT_SSBA15; /* @2744 */
+ unsigned int BLT_SSBA16; /* @2748 */
+ unsigned int BLT_SGA1; /* @2752 */
+ unsigned int BLT_SGA2; /* @2756 */
+ unsigned char fill4[8];
+ unsigned int BLT_ITM0; /* @2768 */
+ unsigned int BLT_ITM1; /* @2772 */
+ unsigned int BLT_ITM2; /* @2776 */
+ unsigned int BLT_ITM3; /* @2780 */
+ unsigned char fill5[16];
+ unsigned int BLT_DFV2; /* @2800 */
+ unsigned int BLT_DFV1; /* @2804 */
+ unsigned int BLT_PRI; /* @2808 */
+ unsigned char fill6[8];
+ unsigned int PLUGS1_OP2; /* @2820 */
+ unsigned int PLUGS1_CHZ; /* @2824 */
+ unsigned int PLUGS1_MSZ; /* @2828 */
+ unsigned int PLUGS1_PGZ; /* @2832 */
+ unsigned char fill7[16];
+ unsigned int PLUGS2_OP2; /* @2852 */
+ unsigned int PLUGS2_CHZ; /* @2856 */
+ unsigned int PLUGS2_MSZ; /* @2860 */
+ unsigned int PLUGS2_PGZ; /* @2864 */
+ unsigned char fill8[16];
+ unsigned int PLUGS3_OP2; /* @2884 */
+ unsigned int PLUGS3_CHZ; /* @2888 */
+ unsigned int PLUGS3_MSZ; /* @2892 */
+ unsigned int PLUGS3_PGZ; /* @2896 */
+ unsigned char fill9[48];
+ unsigned int PLUGT_OP2; /* @2948 */
+ unsigned int PLUGT_CHZ; /* @2952 */
+ unsigned int PLUGT_MSZ; /* @2956 */
+ unsigned int PLUGT_PGZ; /* @2960 */
+ unsigned char fill10[108];
+ unsigned int BLT_NIP; /* @3072 */
+ unsigned int BLT_CIC; /* @3076 */
+ unsigned int BLT_INS; /* @3080 */
+ unsigned int BLT_ACK; /* @3084 */
+ unsigned int BLT_TBA; /* @3088 */
+ unsigned int BLT_TTY; /* @3092 */
+ unsigned int BLT_TXY; /* @3096 */
+ unsigned int BLT_TSZ; /* @3100 */
+ unsigned int BLT_S1CF; /* @3104 */
+ unsigned int BLT_S2CF; /* @3108 */
+ unsigned int BLT_S1BA; /* @3112 */
+ unsigned int BLT_S1TY; /* @3116 */
+ unsigned int BLT_S1XY; /* @3120 */
+ unsigned char fill11[4];
+ unsigned int BLT_S2BA; /* @3128 */
+ unsigned int BLT_S2TY; /* @3132 */
+ unsigned int BLT_S2XY; /* @3136 */
+ unsigned int BLT_S2SZ; /* @3140 */
+ unsigned int BLT_S3BA; /* @3144 */
+ unsigned int BLT_S3TY; /* @3148 */
+ unsigned int BLT_S3XY; /* @3152 */
+ unsigned int BLT_S3SZ; /* @3156 */
+ unsigned int BLT_CWO; /* @3160 */
+ unsigned int BLT_CWS; /* @3164 */
+ unsigned int BLT_CCO; /* @3168 */
+ unsigned int BLT_CML; /* @3172 */
+ unsigned int BLT_FCTL; /* @3176 */
+ unsigned int BLT_PMK; /* @3180 */
+ unsigned int BLT_RSF; /* @3184 */
+ unsigned int BLT_RZI; /* @3188 */
+ unsigned int BLT_HFP; /* @3192 */
+ unsigned int BLT_VFP; /* @3196 */
+ unsigned int BLT_Y_RSF; /* @3200 */
+ unsigned int BLT_Y_RZI; /* @3204 */
+ unsigned int BLT_Y_HFP; /* @3208 */
+ unsigned int BLT_Y_VFP; /* @3212 */
+ unsigned char fill12[16];
+ unsigned int BLT_KEY1; /* @3232 */
+ unsigned int BLT_KEY2; /* @3236 */
+ unsigned char fill13[8];
+ unsigned int BLT_SAR; /* @3248 */
+ unsigned int BLT_USR; /* @3252 */
+ unsigned char fill14[8];
+ unsigned int BLT_IVMX0; /* @3264 */
+ unsigned int BLT_IVMX1; /* @3268 */
+ unsigned int BLT_IVMX2; /* @3272 */
+ unsigned int BLT_IVMX3; /* @3276 */
+ unsigned int BLT_OVMX0; /* @3280 */
+ unsigned int BLT_OVMX1; /* @3284 */
+ unsigned int BLT_OVMX2; /* @3288 */
+ unsigned int BLT_OVMX3; /* @3292 */
+ unsigned char fill15[8];
+ unsigned int BLT_VC1R; /* @3304 */
+ unsigned char fill16[20];
+ unsigned int BLT_Y_HFC0; /* @3328 */
+ unsigned int BLT_Y_HFC1; /* @3332 */
+ unsigned int BLT_Y_HFC2; /* @3336 */
+ unsigned int BLT_Y_HFC3; /* @3340 */
+ unsigned int BLT_Y_HFC4; /* @3344 */
+ unsigned int BLT_Y_HFC5; /* @3348 */
+ unsigned int BLT_Y_HFC6; /* @3352 */
+ unsigned int BLT_Y_HFC7; /* @3356 */
+ unsigned int BLT_Y_HFC8; /* @3360 */
+ unsigned int BLT_Y_HFC9; /* @3364 */
+ unsigned int BLT_Y_HFC10; /* @3368 */
+ unsigned int BLT_Y_HFC11; /* @3372 */
+ unsigned int BLT_Y_HFC12; /* @3376 */
+ unsigned int BLT_Y_HFC13; /* @3380 */
+ unsigned int BLT_Y_HFC14; /* @3384 */
+ unsigned int BLT_Y_HFC15; /* @3388 */
+ unsigned char fill17[80];
+ unsigned int BLT_Y_VFC0; /* @3472 */
+ unsigned int BLT_Y_VFC1; /* @3476 */
+ unsigned int BLT_Y_VFC2; /* @3480 */
+ unsigned int BLT_Y_VFC3; /* @3484 */
+ unsigned int BLT_Y_VFC4; /* @3488 */
+ unsigned int BLT_Y_VFC5; /* @3492 */
+ unsigned int BLT_Y_VFC6; /* @3496 */
+ unsigned int BLT_Y_VFC7; /* @3500 */
+ unsigned int BLT_Y_VFC8; /* @3504 */
+ unsigned int BLT_Y_VFC9; /* @3508 */
+ unsigned char fill18[72];
+ unsigned int BLT_HFC0; /* @3584 */
+ unsigned int BLT_HFC1; /* @3588 */
+ unsigned int BLT_HFC2; /* @3592 */
+ unsigned int BLT_HFC3; /* @3596 */
+ unsigned int BLT_HFC4; /* @3600 */
+ unsigned int BLT_HFC5; /* @3604 */
+ unsigned int BLT_HFC6; /* @3608 */
+ unsigned int BLT_HFC7; /* @3612 */
+ unsigned int BLT_HFC8; /* @3616 */
+ unsigned int BLT_HFC9; /* @3620 */
+ unsigned int BLT_HFC10; /* @3624 */
+ unsigned int BLT_HFC11; /* @3628 */
+ unsigned int BLT_HFC12; /* @3632 */
+ unsigned int BLT_HFC13; /* @3636 */
+ unsigned int BLT_HFC14; /* @3640 */
+ unsigned int BLT_HFC15; /* @3644 */
+ unsigned char fill19[80];
+ unsigned int BLT_VFC0; /* @3728 */
+ unsigned int BLT_VFC1; /* @3732 */
+ unsigned int BLT_VFC2; /* @3736 */
+ unsigned int BLT_VFC3; /* @3740 */
+ unsigned int BLT_VFC4; /* @3744 */
+ unsigned int BLT_VFC5; /* @3748 */
+ unsigned int BLT_VFC6; /* @3752 */
+ unsigned int BLT_VFC7; /* @3756 */
+ unsigned int BLT_VFC8; /* @3760 */
+ unsigned int BLT_VFC9; /* @3764 */
+};
+
+#endif /* !defined(__B2R2_STRUCTURES_H) */
+
diff --git a/drivers/video/b2r2/b2r2_timing.c b/drivers/video/b2r2/b2r2_timing.c
new file mode 100644
index 00000000000..4f3e2b8b042
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/time.h>
+
+
+u32 b2r2_get_curr_nsec(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+
+ return (u32)timespec_to_ns(&ts);
+}
diff --git a/drivers/video/b2r2/b2r2_timing.h b/drivers/video/b2r2/b2r2_timing.h
new file mode 100644
index 00000000000..e87113c0ec9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+
+/**
+ * b2r2_get_curr_nsec() - Return the current nanosecond. Notice that the value
+ * wraps when the u32 limit is reached.
+ *
+ */
+u32 b2r2_get_curr_nsec(void);
+
+#endif /* _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ */
diff --git a/drivers/video/b2r2/b2r2_utils.c b/drivers/video/b2r2/b2r2_utils.c
new file mode 100644
index 00000000000..44c738b5aab
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.c
@@ -0,0 +1,1324 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_utils.h"
+#include "b2r2_debug.h"
+#include "b2r2_internal.h"
+
+const s32 b2r2_s32_max = 2147483647;
+
+
+/**
+ * calculate_scale_factor() - calculates the scale factor between the given
+ * values
+ */
+int calculate_scale_factor(struct device *dev,
+ u32 from, u32 to, u16 *sf_out)
+{
+ int ret;
+ u32 sf;
+
+ b2r2_log_info(dev, "%s\n", __func__);
+
+ if (to == from) {
+ *sf_out = 1 << 10;
+ return 0;
+ } else if (to == 0) {
+ b2r2_log_err(dev, "%s: To is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ sf = (from << 10) / to;
+
+ if ((sf & 0xffff0000) != 0) {
+ /* Overflow error */
+ b2r2_log_warn(dev, "%s: "
+ "Scale factor too large\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ } else if (sf == 0) {
+ b2r2_log_warn(dev, "%s: "
+ "Scale factor too small\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ *sf_out = (u16)sf;
+
+ b2r2_log_info(dev, "%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn(dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect)
+{
+ bounding_rect->x = 0;
+ bounding_rect->y = 0;
+ bounding_rect->width = img->width;
+ bounding_rect->height = img->height;
+}
+
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect)
+{
+ return rect->width == 0 || rect->height == 0;
+}
+
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->x >= rect2->x &&
+ rect1->y >= rect2->y &&
+ rect1->x + rect1->width <= rect2->x + rect2->width &&
+ rect1->y + rect1->height <= rect2->y + rect2->height;
+}
+
+bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->width >= rect2->width &&
+ rect1->height >= rect2->height;
+}
+
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2, struct b2r2_blt_rect *intersection)
+{
+ struct b2r2_blt_rect tmp_rect;
+
+ tmp_rect.x = max(rect1->x, rect2->x);
+ tmp_rect.y = max(rect1->y, rect2->y);
+ tmp_rect.width = min(rect1->x + rect1->width, rect2->x + rect2->width)
+ - tmp_rect.x;
+ if (tmp_rect.width < 0)
+ tmp_rect.width = 0;
+ tmp_rect.height =
+ min(rect1->y + rect1->height, rect2->y + rect2->height) -
+ tmp_rect.y;
+ if (tmp_rect.height < 0)
+ tmp_rect.height = 0;
+
+ *intersection = tmp_rect;
+}
+
+/*
+ * Calculate new rectangles for the supplied
+ * request, so that clipping to destination imaage
+ * can be avoided.
+ * Essentially, the new destination rectangle is
+ * defined inside the old one. Given the transform
+ * and scaling, one has to calculate which part of
+ * the old source rectangle corresponds to
+ * to the new part of old destination rectangle.
+ */
+void b2r2_trim_rects(struct device *dev,
+ const struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *new_bg_rect,
+ struct b2r2_blt_rect *new_dst_rect,
+ struct b2r2_blt_rect *new_src_rect)
+{
+ enum b2r2_blt_transform transform = req->transform;
+ struct b2r2_blt_rect *old_src_rect =
+ (struct b2r2_blt_rect *) &req->src_rect;
+ struct b2r2_blt_rect *old_dst_rect =
+ (struct b2r2_blt_rect *) &req->dst_rect;
+ struct b2r2_blt_rect *old_bg_rect =
+ (struct b2r2_blt_rect *) &req->bg_rect;
+ struct b2r2_blt_rect dst_img_bounds;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ s32 dx = 0;
+ s32 dy = 0;
+ s16 hsf;
+ s16 vsf;
+
+ b2r2_log_info(dev,
+ "%s\nold_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ old_dst_rect->x, old_dst_rect->y,
+ old_dst_rect->width, old_dst_rect->height);
+ b2r2_log_info(dev,
+ "%s\nold_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ old_src_rect->x, old_src_rect->y,
+ old_src_rect->width, old_src_rect->height);
+
+ b2r2_get_img_bounding_rect((struct b2r2_blt_img *) &req->dst_img,
+ &dst_img_bounds);
+
+ /* dst_rect inside dst_img, no clipping necessary */
+ if (b2r2_is_rect_inside_rect(old_dst_rect, &dst_img_bounds))
+ goto keep_rects;
+
+ b2r2_intersect_rects(old_dst_rect, &dst_img_bounds, new_dst_rect);
+ b2r2_log_info(dev,
+ "%s\nnew_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ new_dst_rect->x, new_dst_rect->y,
+ new_dst_rect->width, new_dst_rect->height);
+
+ /* dst_rect completely outside, leave it to validation */
+ if (new_dst_rect->width == 0 || new_dst_rect->height == 0)
+ goto keep_rects;
+
+ dx = new_dst_rect->x - old_dst_rect->x;
+ dy = new_dst_rect->y - old_dst_rect->y;
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ int res = 0;
+ res = calculate_scale_factor(dev, old_src_rect->width,
+ old_dst_rect->height, &hsf);
+ /* invalid dimensions, leave them to validation */
+ if (res < 0)
+ goto keep_rects;
+
+ res = calculate_scale_factor(dev, old_src_rect->height,
+ old_dst_rect->width, &vsf);
+ if (res < 0)
+ goto keep_rects;
+
+ /*
+ * After applying the inverse transform
+ * for 90 degree rotation, the top-left corner
+ * becomes top-right.
+ * src_rect origin is defined as top-left,
+ * so a translation between dst and src
+ * coordinate spaces is necessary.
+ */
+ src_x = (old_src_rect->width << 10) -
+ hsf * (dy + new_dst_rect->height);
+ src_y = dx * vsf;
+ src_w = new_dst_rect->height * hsf;
+ src_h = new_dst_rect->width * vsf;
+ } else {
+ int res = 0;
+ res = calculate_scale_factor(dev, old_src_rect->width,
+ old_dst_rect->width, &hsf);
+ if (res < 0)
+ goto keep_rects;
+
+ res = calculate_scale_factor(dev, old_src_rect->height,
+ old_dst_rect->height, &vsf);
+ if (res < 0)
+ goto keep_rects;
+
+ src_x = dx * hsf;
+ src_y = dy * vsf;
+ src_w = new_dst_rect->width * hsf;
+ src_h = new_dst_rect->height * vsf;
+ }
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular destination rectangle.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination rectangle, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination rectangle maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination rectangle is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = old_src_rect->width - src_x - src_w;
+
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = old_src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src_rect coordinates into true
+ * src_buffer coordinates.
+ */
+ src_x += old_src_rect->x;
+ src_y += old_src_rect->y;
+
+ new_src_rect->x = src_x;
+ new_src_rect->y = src_y;
+ new_src_rect->width = src_w;
+ new_src_rect->height = src_h;
+
+ b2r2_log_info(dev,
+ "%s\nnew_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ new_src_rect->x, new_src_rect->y,
+ new_src_rect->width, new_src_rect->height);
+
+ if (req->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* Modify bg_rect in the same way as dst_rect */
+ s32 dw = new_dst_rect->width - old_dst_rect->width;
+ s32 dh = new_dst_rect->height - old_dst_rect->height;
+ b2r2_log_info(dev,
+ "%s\nold bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ __func__, old_bg_rect->x, old_bg_rect->y,
+ old_bg_rect->width, old_bg_rect->height);
+ new_bg_rect->x = old_bg_rect->x + dx;
+ new_bg_rect->y = old_bg_rect->y + dy;
+ new_bg_rect->width = old_bg_rect->width + dw;
+ new_bg_rect->height = old_bg_rect->height + dh;
+ b2r2_log_info(dev,
+ "%s\nnew bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ __func__, new_bg_rect->x, new_bg_rect->y,
+ new_bg_rect->width, new_bg_rect->height);
+ }
+ return;
+keep_rects:
+ /*
+ * Recalculation was not possible, or not necessary.
+ * Do not change anything, leave it to validation.
+ */
+ *new_src_rect = *old_src_rect;
+ *new_dst_rect = *old_dst_rect;
+ *new_bg_rect = *old_bg_rect;
+ b2r2_log_info(dev, "%s original rectangles preserved.\n", __func__);
+ return;
+}
+
+int b2r2_get_fmt_bpp(struct device *dev, enum b2r2_blt_fmt fmt)
+{
+ /*
+ * Currently this function is not used that often but if that changes a
+ * lookup table could make it a lot faster.
+ */
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return 1;
+
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return 8;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return 12;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return 16;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return 24;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 32;
+
+ default:
+ b2r2_log_err(dev,
+ "%s: Internal error! Format %#x not recognized.\n",
+ __func__, fmt);
+ return 32;
+ }
+}
+
+int b2r2_get_fmt_y_bpp(struct device *dev, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 8;
+
+ default:
+ b2r2_log_err(dev,
+ "%s: Internal error! Non YCbCr format supplied.\n",
+ __func__);
+ return 8;
+ }
+}
+
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+u32 b2r2_calc_pitch_from_width(struct device *dev,
+ s32 width, enum b2r2_blt_fmt fmt)
+{
+ if (b2r2_is_single_plane_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_bpp(dev, fmt), 8);
+ } else if (b2r2_is_ycbcrsp_fmt(fmt) || b2r2_is_ycbcrp_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_y_bpp(dev, fmt), 8);
+ } else {
+ b2r2_log_err(dev, "%s: Internal error! "
+ "Pitchless format supplied.\n",
+ __func__);
+ return 0;
+ }
+}
+
+u32 b2r2_get_img_pitch(struct device *dev, struct b2r2_blt_img *img)
+{
+ if (img->pitch != 0)
+ return img->pitch;
+ else
+ return b2r2_calc_pitch_from_width(dev, img->width, img->fmt);
+}
+
+s32 b2r2_get_img_size(struct device *dev, struct b2r2_blt_img *img)
+{
+ if (b2r2_is_single_plane_fmt(img->fmt)) {
+ return (s32)b2r2_get_img_pitch(dev, img) * img->height;
+ } else if (b2r2_is_ycbcrsp_fmt(img->fmt) ||
+ b2r2_is_ycbcrp_fmt(img->fmt)) {
+ s32 y_plane_size;
+
+ y_plane_size = (s32)b2r2_get_img_pitch(dev, img) * img->height;
+
+ if (b2r2_is_ycbcr420_fmt(img->fmt)) {
+ return y_plane_size + y_plane_size / 2;
+ } else if (b2r2_is_ycbcr422_fmt(img->fmt)) {
+ return y_plane_size * 2;
+ } else if (b2r2_is_ycbcr444_fmt(img->fmt)) {
+ return y_plane_size * 3;
+ } else {
+ b2r2_log_err(dev, "%s: Internal error!"
+ " Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+ } else if (b2r2_is_mb_fmt(img->fmt)) {
+ return (img->width * img->height *
+ b2r2_get_fmt_bpp(dev, img->fmt)) / 8;
+ } else {
+ b2r2_log_err(dev, "%s: Internal error! "
+ "Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+}
+
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor)
+{
+ s32 quotient = dividend / divisor;
+ if (dividend % divisor != 0)
+ quotient++;
+
+ return quotient;
+}
+
+bool b2r2_is_aligned(s32 value, s32 alignment)
+{
+ return value % alignment == 0;
+}
+
+s32 b2r2_align_up(s32 value, s32 alignment)
+{
+ s32 remainder = abs(value) % abs(alignment);
+ s32 value_to_add;
+
+ if (remainder > 0) {
+ if (value >= 0)
+ value_to_add = alignment - remainder;
+ else
+ value_to_add = remainder;
+ } else {
+ value_to_add = 0;
+ }
+
+ return value + value_to_add;
+}
+
+/**
+ * b2r2_get_alpha_range() - returns the alpha range of the given format
+ */
+enum b2r2_ty b2r2_get_alpha_range(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ default:
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+ }
+}
+
+/**
+ * b2r2_get_alpha() - returns the pixel alpha in 0...255 range
+ */
+u8 b2r2_get_alpha(enum b2r2_blt_fmt fmt, u32 pixel)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return (pixel >> 24) & 0xff;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return pixel & 0xff;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return (pixel & 0xfff) >> 16;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return (((pixel >> 12) & 0xf) * 255) / 15;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return (pixel >> 15) * 255;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return pixel * 255;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return pixel;
+ default:
+ return 255;
+ }
+}
+
+/**
+ * b2r2_set_alpha() - returns a color value with the alpha component set
+ */
+u32 b2r2_set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color)
+{
+ u32 alpha_mask;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ color &= 0x00ffffff;
+ alpha_mask = alpha << 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ color &= 0xffffff00;
+ alpha_mask = alpha;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ color &= 0x00ffff;
+ alpha_mask = alpha << 16;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ color &= 0x0fff;
+ alpha_mask = (alpha << 8) & 0xF000;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ color &= 0x7fff;
+ alpha_mask = (alpha / 255) << 15 ;
+ break;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ color = 0;
+ alpha_mask = (alpha / 255);
+ break;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ color = 0;
+ alpha_mask = alpha;
+ break;
+ default:
+ alpha_mask = 0;
+ }
+
+ return color | alpha_mask;
+}
+
+/**
+ * b2r2_fmt_has_alpha() - returns whether the given format carries an alpha value
+ */
+bool b2r2_fmt_has_alpha(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * b2r2_is_rgb_fmt() - returns whether the given format is a rgb format
+ */
+bool b2r2_is_rgb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * b2r2_is_bgr_fmt() - returns whether the given format is a bgr format
+ */
+bool b2r2_is_bgr_fmt(enum b2r2_blt_fmt fmt)
+{
+ return (fmt == B2R2_BLT_FMT_32_BIT_ABGR8888);
+}
+
+/**
+ * b2r2_is_yuv_fmt() - returns whether the given format is a yuv format
+ */
+bool b2r2_is_yuv_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * b2r2_is_yvu_fmt() - returns whether the given format is a yvu format
+ */
+bool b2r2_is_yvu_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * b2r2_is_yuv420_fmt() - returns whether the given format is a yuv420 format
+ */
+bool b2r2_is_yuv420_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_yuv422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * b2r2_is_yvu420_fmt() - returns whether the given format is a yvu420 format
+ */
+bool b2r2_is_yvu420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_yvu422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+/**
+ * b2r2_is_yuv444_fmt() - returns whether the given format is a yuv444 format
+ */
+bool b2r2_is_yuv444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * b2r2_fmt_byte_pitch() - returns the pitch of a pixmap with the given width
+ */
+int b2r2_fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width)
+{
+ int pitch;
+
+ switch (fmt) {
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ pitch = width >> 3; /* Shift is faster than division */
+ if ((width & 0x3) != 0) /* Check for remainder */
+ pitch++;
+ return pitch;
+
+ case B2R2_BLT_FMT_8_BIT_A8: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return width << 1;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return width << 2;
+
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ return 0;
+ }
+}
+
+/**
+ * b2r2_to_native_fmt() - returns the native B2R2 format
+ */
+enum b2r2_native_fmt b2r2_to_native_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* Not actually supported by HW */
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* Not actually supported by HW */
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+u32 b2r2_to_RGB888(u32 color, const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+/**
+ * b2r2_get_fmt_type() - returns the type of the given format (raster, planar, etc.)
+ */
+enum b2r2_fmt_type b2r2_get_fmt_type(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_FMT_TYPE_RASTER;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_FMT_TYPE_PLANAR;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_FMT_TYPE_SEMI_PLANAR;
+ default:
+ return B2R2_FMT_TYPE_RASTER;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sprintf_req() - Builds a string representing the request, for debug
+ *
+ * @request:Request that should be encoded into a string
+ * @buf: Receiving buffer
+ * @size: Size of receiving buffer
+ *
+ * Returns number of characters in string, excluding null terminator
+ */
+int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
+{
+ size_t dev_size = 0;
+
+ /* generic request info */
+ dev_size += sprintf(buf + dev_size,
+ "instance : 0x%08lX\n",
+ (unsigned long) request->instance);
+ dev_size += sprintf(buf + dev_size,
+ "size : %d bytes\n", request->user_req.size);
+ dev_size += sprintf(buf + dev_size,
+ "flags : 0x%08lX\n",
+ (unsigned long) request->user_req.flags);
+ dev_size += sprintf(buf + dev_size,
+ "transform : %d\n",
+ (int) request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "prio : %d\n", request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "global_alpha : %d\n",
+ (int) request->user_req.global_alpha);
+ dev_size += sprintf(buf + dev_size,
+ "report1 : 0x%08lX\n",
+ (unsigned long) request->user_req.report1);
+ dev_size += sprintf(buf + dev_size,
+ "report2 : 0x%08lX\n",
+ (unsigned long) request->user_req.report2);
+ dev_size += sprintf(buf + dev_size,
+ "request_id : 0x%08lX\n\n",
+ (unsigned long) request->request_id);
+
+ /* src info */
+ dev_size += sprintf(buf + dev_size,
+ "src_img.fmt : %#010x\n",
+ request->user_req.src_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d, "
+ "offset=%d, len=%d}\n",
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.hwmem_buf_name,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.fmt : %#010x\n",
+ request->user_req.src_mask.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.src_mask.buf.type,
+ request->user_req.src_mask.buf.hwmem_buf_name,
+ request->user_req.src_mask.buf.fd,
+ request->user_req.src_mask.buf.offset,
+ request->user_req.src_mask.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_mask.width,
+ request->user_req.src_mask.height,
+ request->user_req.src_mask.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "src_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.src_color);
+
+ /* bg info */
+ dev_size += sprintf(buf + dev_size,
+ "bg_img.fmt : %#010x\n",
+ request->user_req.bg_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "bg_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.hwmem_buf_name,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "bg_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "bg_rect : {x=%d, y=%d, width=%d, height=%d}\n\n",
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ /* dst info */
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.fmt : %#010x\n",
+ request->user_req.dst_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.hwmem_buf_name,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "dst_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_clip_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.dst_color);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.file_len : %d\n\n",
+ request->src_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.file_len : %d\n\n",
+ request->src_mask_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.file_len : %d\n\n",
+ request->dst_resolved.file_len);
+
+ return dev_size;
+}
+#endif
+
+void b2r2_recalculate_rects(struct device *dev,
+ struct b2r2_blt_req *req)
+{
+ struct b2r2_blt_rect new_dst_rect;
+ struct b2r2_blt_rect new_src_rect;
+ struct b2r2_blt_rect new_bg_rect;
+
+ b2r2_trim_rects(dev,
+ req, &new_bg_rect, &new_dst_rect, &new_src_rect);
+
+ req->dst_rect = new_dst_rect;
+ req->src_rect = new_src_rect;
+ if (req->flags & B2R2_BLT_FLAG_BG_BLEND)
+ req->bg_rect = new_bg_rect;
+}
diff --git a/drivers/video/b2r2/b2r2_utils.h b/drivers/video/b2r2/b2r2_utils.h
new file mode 100644
index 00000000000..081ac1f4848
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+extern const s32 b2r2_s32_max;
+
+int calculate_scale_factor(struct device *dev,
+ u32 from, u32 to, u16 *sf_out);
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect);
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect);
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2,
+ struct b2r2_blt_rect *intersection);
+void b2r2_trim_rects(struct device *dev,
+ const struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *new_bg_rect,
+ struct b2r2_blt_rect *new_dst_rect,
+ struct b2r2_blt_rect *new_src_rect);
+
+int b2r2_get_fmt_bpp(struct device *dev, enum b2r2_blt_fmt fmt);
+int b2r2_get_fmt_y_bpp(struct device *dev, enum b2r2_blt_fmt fmt);
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt);
+
+/*
+ * Rounds up if an invalid width causes the pitch to be non byte aligned.
+ */
+u32 b2r2_calc_pitch_from_width(struct device *dev,
+ s32 width, enum b2r2_blt_fmt fmt);
+u32 b2r2_get_img_pitch(struct device *dev,
+ struct b2r2_blt_img *img);
+s32 b2r2_get_img_size(struct device *dev,
+ struct b2r2_blt_img *img);
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor);
+bool b2r2_is_aligned(s32 value, s32 alignment);
+s32 b2r2_align_up(s32 value, s32 alignment);
+
+enum b2r2_ty b2r2_get_alpha_range(enum b2r2_blt_fmt fmt);
+u8 b2r2_get_alpha(enum b2r2_blt_fmt fmt, u32 pixel);
+u32 b2r2_set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color);
+bool b2r2_fmt_has_alpha(enum b2r2_blt_fmt fmt);
+bool b2r2_is_rgb_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_bgr_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yuv_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yvu_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yuv420_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yuv422_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yvu420_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yvu422_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_yuv444_fmt(enum b2r2_blt_fmt fmt);
+int b2r2_fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width);
+enum b2r2_native_fmt b2r2_to_native_fmt(enum b2r2_blt_fmt fmt);
+u32 b2r2_to_RGB888(u32 color, const enum b2r2_blt_fmt fmt);
+enum b2r2_fmt_type b2r2_get_fmt_type(enum b2r2_blt_fmt fmt);
+#ifdef CONFIG_DEBUG_FS
+int sprintf_req(struct b2r2_blt_request *request, char *buf, int size);
+#endif
+void b2r2_recalculate_rects(struct device *dev,
+ struct b2r2_blt_req *req);
+
+#endif
diff --git a/drivers/video/mcde/Kconfig b/drivers/video/mcde/Kconfig
new file mode 100644
index 00000000000..cb88a66d370
--- /dev/null
+++ b/drivers/video/mcde/Kconfig
@@ -0,0 +1,96 @@
+config FB_MCDE
+ tristate "MCDE support"
+ depends on FB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select HWMEM
+ ---help---
+ This enables support for MCDE based frame buffer driver.
+
+ Please read the file <file:Documentation/fb/mcde.txt>
+
+config FB_MCDE_DEBUG
+ bool "MCDE debug messages"
+ depends on FB_MCDE
+ ---help---
+ Say Y here if you want the MCDE driver to output debug messages
+
+config FB_MCDE_VDEBUG
+ bool "MCDE verbose debug messages"
+ depends on FB_MCDE_DEBUG
+ ---help---
+ Say Y here if you want the MCDE driver to output more debug messages
+
+config MCDE_FB_AVOID_REALLOC
+ bool "MCDE early allocate framebuffer"
+ default n
+ depends on FB_MCDE
+ ---help---
+ If you say Y here maximum frame buffer size is allocated and
+ used for all resolutions. If you say N here, the frame buffer is
+ reallocated when resolution is changed. This reallocation might
+ fail because of fragmented memory. Note that this memory will
+ never be deallocated, while the MCDE framebuffer is used.
+
+config MCDE_DISPLAY_DSI
+ bool "Support for DSI displays within MCDE"
+ depends on FB_MCDE
+ default y
+
+menu "MCDE DSI displays"
+ depends on MCDE_DISPLAY_DSI
+
+config MCDE_DISPLAY_GENERIC_DSI
+ tristate "Generic DSI display driver"
+
+config MCDE_DISPLAY_SAMSUNG_S6D16D0
+ bool "Samsung S6D16D0 DSI display driver"
+ ---help---
+ Say Y if you have a TPO Taal or Blackpearl display panel.
+
+config MCDE_DISPLAY_SONY_ACX424AKP_DSI
+ tristate "Sony acx424akp DSI display driver"
+
+config MCDE_DISPLAY_AV8100
+ tristate "AV8100 HDMI/CVBS display driver"
+ select AV8100
+
+config MCDE_DISPLAY_HDMI_FB_AUTO_CREATE
+ bool "HDMI_FB_AUTO_CREATE"
+ default y
+ depends on MCDE_DISPLAY_AV8100
+ ---help---
+ Say Y if you want the HDMI frame buffer to be created on start
+ Say N if you want the HDMI frame buffer to be created when HDMI
+ cable is plugged (needs user space HDMIservice)
+
+endmenu
+
+config MCDE_DISPLAY_DPI
+ bool "Support for DPI displays within MCDE"
+ depends on FB_MCDE
+ default n
+ ---help---
+ Add this option to choose which DPI display driver for MCDE to include
+
+ DPI (Display Pixel Interface) is a MIPI Alliance standard used for
+ active-matrix LCDs. The DPI uses parallel data lines.
+
+menu "MCDE DPI displays"
+ depends on MCDE_DISPLAY_DPI
+
+config MCDE_DISPLAY_VUIB500_DPI
+ tristate "DPI display driver for the VUIB500 board"
+ ---help---
+ The VUIB500 is an ST-Ericsson user interface board.
+
+endmenu
+
+config MCDE_DISPLAY_AB8500_DENC
+ tristate "AB8500 CVBS display driver"
+ depends on FB_MCDE
+ select AB8500_DENC
+
+
diff --git a/drivers/video/mcde/Makefile b/drivers/video/mcde/Makefile
new file mode 100644
index 00000000000..82a78c2542a
--- /dev/null
+++ b/drivers/video/mcde/Makefile
@@ -0,0 +1,23 @@
+mcde-objs += mcde_mod.o
+mcde-objs += mcde_hw.o
+mcde-objs += mcde_dss.o
+mcde-objs += mcde_display.o
+mcde-objs += mcde_bus.o
+mcde-objs += mcde_fb.o
+mcde-objs += mcde_debugfs.o
+obj-$(CONFIG_FB_MCDE) += mcde.o
+
+obj-$(CONFIG_MCDE_DISPLAY_GENERIC_DSI) += display-generic_dsi.o
+obj-$(CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0) += display-samsung_s6d16d0.o
+obj-$(CONFIG_MCDE_DISPLAY_SONY_ACX424AKP_DSI) += display-sony_acx424akp_dsi.o
+obj-$(CONFIG_MCDE_DISPLAY_VUIB500_DPI) += display-vuib500-dpi.o
+obj-$(CONFIG_MCDE_DISPLAY_AB8500_DENC) += display-ab8500.o
+obj-$(CONFIG_MCDE_DISPLAY_AV8100) += display-av8100.o
+obj-$(CONFIG_DISPLAY_FICTIVE) += display-fictive.o
+
+ifdef CONFIG_FB_MCDE_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+ifdef CONFIG_FB_MCDE_VDEBUG
+EXTRA_CFLAGS += -DVERBOSE_DEBUG
+endif
diff --git a/drivers/video/mcde/display-ab8500.c b/drivers/video/mcde/display-ab8500.c
new file mode 100644
index 00000000000..a761a5eec80
--- /dev/null
+++ b/drivers/video/mcde/display-ab8500.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * AB8500 display driver
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/ab8500/denc.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-ab8500.h>
+
+#define AB8500_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__)
+
+#define SDTV_PIXCLOCK 37037
+
+/*
+ * PAL:
+ * Total nr of active lines: 576
+ * Total nr of blanking lines: 49
+ * total: 625
+ */
+#define PAL_HBP 132
+#define PAL_HFP 12
+#define PAL_VBP_FIELD_1 22
+#define PAL_VBP_FIELD_2 23
+#define PAL_VFP_FIELD_1 2
+#define PAL_VFP_FIELD_2 2
+
+/*
+ * NTSC (ITU-R BT.470-5):
+ * Total nr of active lines: 486
+ * Total nr of blanking lines: 39
+ * total: 525
+ */
+#define NTSC_ORG_HBP 122
+#define NTSC_ORG_HFP 16
+#define NTSC_ORG_VBP_FIELD_1 16
+#define NTSC_ORG_VBP_FIELD_2 17
+#define NTSC_ORG_VFP_FIELD_1 3
+#define NTSC_ORG_VFP_FIELD_2 3
+
+/*
+ * NTSC (DV variant):
+ * Total nr of active lines: 480
+ * Total nr of blanking lines: 45
+ * total: 525
+ */
+#define NTSC_HBP 122
+#define NTSC_HFP 16
+#define NTSC_VBP_FIELD_1 19
+#define NTSC_VBP_FIELD_2 20
+#define NTSC_VFP_FIELD_1 3
+#define NTSC_VFP_FIELD_2 3
+
+struct display_driver_data {
+ struct ab8500_denc_conf denc_conf;
+ struct platform_device *denc_dev;
+ int nr_regulators;
+ struct regulator **regulator;
+};
+
+static int try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode);
+static int on_first_update(struct mcde_display_device *ddev);
+static int display_update(struct mcde_display_device *ddev,
+ bool tripple_buffer);
+
+static int __devinit ab8500_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ int i;
+ struct ab8500_display_platform_data *pdata = ddev->dev.platform_data;
+ struct display_driver_data *driver_data;
+
+ AB8500_DISP_TRACE;
+
+ if (pdata == NULL) {
+ dev_err(&ddev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+ if (ddev->port->type != MCDE_PORTTYPE_DPI) {
+ dev_err(&ddev->dev, "%s:Invalid port type %d\n", __func__,
+ ddev->port->type);
+ return -EINVAL;
+ }
+
+ driver_data = (struct display_driver_data *)
+ kzalloc(sizeof(struct display_driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(&ddev->dev, "Failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+ driver_data->denc_dev = ab8500_denc_get_device();
+ if (!driver_data->denc_dev) {
+ dev_err(&ddev->dev, "Failed to get DENC device\n");
+ ret = -ENODEV;
+ goto dev_get_failed;
+ }
+
+ driver_data->regulator = kzalloc(pdata->nr_regulators *
+ sizeof(struct regulator *), GFP_KERNEL);
+ if (!driver_data->regulator) {
+ dev_err(&ddev->dev, "Failed to allocate regulator list\n");
+ ret = -ENOMEM;
+ goto reg_alloc_failed;
+ }
+ for (i = 0; i < pdata->nr_regulators; i++) {
+ driver_data->regulator[i] = regulator_get(&ddev->dev,
+ pdata->regulator_id[i]);
+ if (IS_ERR(driver_data->regulator[i])) {
+ ret = PTR_ERR(driver_data->regulator[i]);
+ dev_warn(&ddev->dev, "%s:Failed to get regulator %s\n",
+ __func__, pdata->regulator_id[i]);
+ goto regulator_get_failed;
+ }
+ }
+ driver_data->nr_regulators = pdata->nr_regulators;
+
+ dev_set_drvdata(&ddev->dev, driver_data);
+
+ ddev->try_video_mode = try_video_mode;
+ ddev->set_video_mode = set_video_mode;
+ ddev->set_power_mode = set_power_mode;
+ ddev->on_first_update = on_first_update;
+ ddev->update = display_update;
+
+ return 0;
+
+regulator_get_failed:
+ for (i--; i >= 0; i--)
+ regulator_put(driver_data->regulator[i]);
+ kfree(driver_data->regulator);
+ driver_data->regulator = NULL;
+reg_alloc_failed:
+ ab8500_denc_put_device(driver_data->denc_dev);
+dev_get_failed:
+ kfree(driver_data);
+ return ret;
+}
+
+static int __devexit ab8500_remove(struct mcde_display_device *ddev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ if (driver_data->regulator) {
+ int i;
+ for (i = driver_data->nr_regulators - 1; i >= 0; i--)
+ regulator_put(driver_data->regulator[i]);
+ kfree(driver_data->regulator);
+ driver_data->regulator = NULL;
+ driver_data->nr_regulators = 0;
+ }
+ ab8500_denc_put_device(driver_data->denc_dev);
+ kfree(driver_data);
+ return 0;
+}
+
+static int ab8500_resume(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ AB8500_DISP_TRACE;
+
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s: Failed to resume display\n",
+ __func__);
+
+ return ret;
+}
+
+static int ab8500_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret = 0;
+ AB8500_DISP_TRACE;
+
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s: Failed to suspend display\n",
+ __func__);
+
+ return ret;
+}
+
+
+static struct mcde_display_driver ab8500_driver = {
+ .probe = ab8500_probe,
+ .remove = ab8500_remove,
+ .suspend = ab8500_suspend,
+ .resume = ab8500_resume,
+ .driver = {
+ .name = "mcde_tv_ab8500",
+ },
+};
+
+static void print_vmode(struct mcde_video_mode *vmode)
+{
+ pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres);
+ pr_debug(" pixclock: %d\n", vmode->pixclock);
+ pr_debug(" hbp: %d\n", vmode->hbp);
+ pr_debug(" hfp: %d\n", vmode->hfp);
+ pr_debug(" vbp: %d\n", vmode->vbp);
+ pr_debug(" vfp: %d\n", vmode->vfp);
+ pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false");
+}
+
+static int try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ AB8500_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (video_mode->xres != 720) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ /* TODO: move this part to MCDE: mcde_dss_try_video_mode? */
+ /* check for PAL */
+ switch (video_mode->yres) {
+ case 576:
+ /* set including SAV/EAV: */
+ video_mode->hbp = PAL_HBP;
+ video_mode->hfp = PAL_HFP;
+ video_mode->vbp = PAL_VBP_FIELD_1 + PAL_VBP_FIELD_2;
+ video_mode->vfp = PAL_VFP_FIELD_1 + PAL_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ case 480:
+ /* set including SAV/EAV */
+ video_mode->hbp = NTSC_HBP;
+ video_mode->hfp = NTSC_HFP;
+ video_mode->vbp = NTSC_VBP_FIELD_1 + NTSC_VBP_FIELD_2;
+ video_mode->vfp = NTSC_VFP_FIELD_1 + NTSC_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ case 486:
+ /* set including SAV/EAV */
+ video_mode->hbp = NTSC_ORG_HBP;
+ video_mode->hfp = NTSC_ORG_HFP;
+ video_mode->vbp = NTSC_ORG_VBP_FIELD_1 + NTSC_ORG_VBP_FIELD_2;
+ video_mode->vfp = NTSC_ORG_VFP_FIELD_1 + NTSC_ORG_VFP_FIELD_2;
+ video_mode->interlaced = true;
+ video_mode->pixclock = SDTV_PIXCLOCK;
+ break;
+ default:
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ print_vmode(video_mode);
+
+ return 0;
+
+}
+
+static int set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res;
+ struct ab8500_display_platform_data *pdata = ddev->dev.platform_data;
+ struct display_driver_data *driver_data =
+ (struct display_driver_data *)dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ ddev->video_mode = *video_mode;
+
+ if (video_mode->xres != 720) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+ /* check for PAL BDGHI and N */
+ switch (video_mode->yres) {
+ case 576:
+ driver_data->denc_conf.TV_std = TV_STD_PAL_BDGHI;
+ /* TODO: how to choose LOW DEF FILTER */
+ driver_data->denc_conf.cr_filter = TV_CR_PAL_HIGH_DEF_FILTER;
+ /* TODO: PAL N (e.g. uses a setup of 7.5 IRE) */
+ driver_data->denc_conf.black_level_setup = false;
+ break;
+ case 480: /* NTSC, PAL M DV variant */
+ case 486: /* NTSC, PAL M original */
+ /* TODO: PAL M */
+ driver_data->denc_conf.TV_std = TV_STD_NTSC_M;
+ /* TODO: how to choose LOW DEF FILTER */
+ driver_data->denc_conf.cr_filter = TV_CR_NTSC_HIGH_DEF_FILTER;
+ driver_data->denc_conf.black_level_setup = true;
+ break;
+ default:
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+
+
+ driver_data->denc_conf.progressive = !video_mode->interlaced;
+ driver_data->denc_conf.act_output = true;
+ driver_data->denc_conf.test_pattern = false;
+ driver_data->denc_conf.partial_blanking = true;
+ driver_data->denc_conf.blank_all = false;
+ driver_data->denc_conf.suppress_col = false;
+ driver_data->denc_conf.phase_reset_mode = TV_PHASE_RST_MOD_DISABLE;
+ driver_data->denc_conf.dac_enable = false;
+ driver_data->denc_conf.act_dc_output = true;
+
+ set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (pdata->rgb_2_yCbCr_transform)
+ mcde_chnl_set_col_convert(ddev->chnl_state,
+ pdata->rgb_2_yCbCr_transform,
+ MCDE_CONVERT_RGB_2_YCBCR);
+ mcde_chnl_stop_flow(ddev->chnl_state);
+ res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode);
+ if (res < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n",
+ __func__);
+
+ return res;
+ }
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+
+ return 0;
+}
+
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ int i;
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ AB8500_DISP_TRACE;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ dev_dbg(&ddev->dev, "off -> standby\n");
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ goto error;
+ }
+ if (driver_data->regulator) {
+ for (i = 0; i < driver_data->nr_regulators; i++) {
+ ret = regulator_enable(
+ driver_data->regulator[i]);
+ if (ret)
+ goto off_to_standby_failed;
+ dev_dbg(&ddev->dev, "regulator %d on\n", i);
+ }
+ }
+ ab8500_denc_power_up(driver_data->denc_dev);
+ ab8500_denc_reset(driver_data->denc_dev, true);
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+ dev_dbg(&ddev->dev, "standby -> on\n");
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ dev_dbg(&ddev->dev, "on -> standby\n");
+ ab8500_denc_reset(driver_data->denc_dev, false);
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ bool error = false;
+ dev_dbg(&ddev->dev, "standby -> off\n");
+ if (driver_data->regulator) {
+ for (i = 0; i < driver_data->nr_regulators; i++) {
+ ret = regulator_disable(
+ driver_data->regulator[i]);
+ /* continue in case of an error */
+ error |= (ret != 0);
+ dev_dbg(&ddev->dev, "regulator %d off\n", i);
+ }
+ }
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ error |= (ret != 0);
+ }
+ if (error) {
+ /* the latest error code is returned */
+ goto error;
+ }
+ memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode));
+ ab8500_denc_power_down(driver_data->denc_dev);
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return 0;
+
+ /* In case of an error, try to leave in off-state */
+off_to_standby_failed:
+ for (i--; i >= 0; i--)
+ regulator_disable(driver_data->regulator[i]);
+ ddev->platform_disable(ddev);
+
+error:
+ dev_err(&ddev->dev, "Failed to set power mode");
+ return ret;
+}
+
+static int on_first_update(struct mcde_display_device *ddev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ ab8500_denc_conf(driver_data->denc_dev, &driver_data->denc_conf);
+ ab8500_denc_conf_plug_detect(driver_data->denc_dev, true, false,
+ TV_PLUG_TIME_2S);
+ ab8500_denc_mask_int_plug_det(driver_data->denc_dev, false, false);
+ ddev->first_update = false;
+ return 0;
+}
+
+static int display_update(struct mcde_display_device *ddev, bool tripple_buffer)
+{
+ int ret;
+
+ if (ddev->first_update)
+ on_first_update(ddev);
+ if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) {
+ ret = set_power_mode(ddev, MCDE_DISPLAY_PM_ON);
+ if (ret < 0)
+ goto error;
+ }
+ ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area,
+ tripple_buffer);
+ if (ret < 0)
+ goto error;
+out:
+ return ret;
+error:
+ dev_warn(&ddev->dev, "%s:Failed to set power mode to on\n", __func__);
+ goto out;
+}
+
+/* Module init */
+static int __init mcde_display_tvout_ab8500_init(void)
+{
+ pr_debug("%s\n", __func__);
+
+ return mcde_display_driver_register(&ab8500_driver);
+}
+late_initcall(mcde_display_tvout_ab8500_init);
+
+static void __exit mcde_display_tvout_ab8500_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ mcde_display_driver_unregister(&ab8500_driver);
+}
+module_exit(mcde_display_tvout_ab8500_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE TVout through AB8500 display driver");
diff --git a/drivers/video/mcde/display-av8100.c b/drivers/video/mcde/display-av8100.c
new file mode 100644
index 00000000000..70750998824
--- /dev/null
+++ b/drivers/video/mcde/display-av8100.c
@@ -0,0 +1,1656 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson HDMI display driver
+ *
+ * Author: Per Persson <per-xb-persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/compdev.h>
+#include <linux/clonedev.h>
+
+#include <video/mcde_fb.h>
+#include <video/mcde_display.h>
+#include <video/mcde_display-av8100.h>
+#include <video/av8100.h>
+#include <video/hdmi.h>
+
+#define SWITCH_HELPSTR ", 0=HDMI, 1=SDTV, 2=DVI\n"
+
+/* AVI Infoframe */
+#define AVI_INFOFRAME_DATA_SIZE 13
+#define AVI_INFOFRAME_TYPE 0x82
+#define AVI_INFOFRAME_VERSION 0x02
+#define AVI_INFOFRAME_DB1 0x10 /* Active Information present */
+#define AVI_INFOFRAME_DB2 0x08 /* Active Portion Aspect ratio */
+
+#ifdef CONFIG_DISPLAY_AV8100_TRIPPLE_BUFFER
+#define NUM_FB_BUFFERS 3
+#else
+#define NUM_FB_BUFFERS 2
+#endif
+
+#define DSI_HS_FREQ_HZ 840320000
+#define DSI_LP_FREQ_HZ 19200000
+
+struct cea_vesa_video_mode {
+ u32 cea;
+ u32 vesa_cea_nr;
+ struct mcde_video_mode *video_mode;
+};
+
+static int hdmi_try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode);
+static int hdmi_set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode);
+static int hdmi_set_pixel_format(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format);
+static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr);
+static int ceanr_convert(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, u16 *w, u16 *h);
+
+static ssize_t show_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_disponoff(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_disponoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t show_vesacea(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t show_timing(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t store_timing(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static ssize_t store_stayalive(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static DEVICE_ATTR(disponoff, S_IRUGO | S_IWUSR, show_disponoff,
+ store_disponoff);
+static DEVICE_ATTR(vesacea, S_IRUGO, show_vesacea, NULL);
+static DEVICE_ATTR(timing, S_IRUGO | S_IWUSR, show_timing, store_timing);
+static DEVICE_ATTR(stayalive, S_IWUSR, NULL, store_stayalive);
+
+static DEVICE_ATTR(hdmisdtvswitch, S_IRUGO | S_IWUSR, show_hdmisdtvswitch,
+ store_hdmisdtvswitch);
+static DEVICE_ATTR(input_pixel_format, S_IRUGO | S_IWUSR,
+ show_input_pixel_format, store_input_pixel_format);
+
+static ssize_t show_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ int index;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ sprintf(buf, "%1x%s", mdev->port->hdmi_sdtv_switch, SWITCH_HELPSTR);
+ index = 1 + strlen(SWITCH_HELPSTR) + 1;
+
+ return index;
+}
+
+static ssize_t store_hdmisdtvswitch(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (count > 0) {
+ if ((*buf == 0) || (*buf == '0')) {
+ dev_dbg(dev, "hdmi/sdtv switch = hdmi\n");
+ mdev->port->hdmi_sdtv_switch = HDMI_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_HDMI;
+ mdev->native_y_res = NATIVE_YRES_HDMI;
+ } else if ((*buf == 1) || (*buf == '1')) {
+ dev_dbg(dev, "hdmi/sdtv switch = sdtv\n");
+ mdev->port->hdmi_sdtv_switch = SDTV_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_SDTV;
+ mdev->native_y_res = NATIVE_YRES_SDTV;
+ } else if ((*buf == 2) || (*buf == '2')) {
+ dev_dbg(dev, "hdmi/sdtv switch = dvi\n");
+ mdev->port->hdmi_sdtv_switch = DVI_SWITCH;
+ mdev->native_x_res = NATIVE_XRES_HDMI;
+ mdev->native_y_res = NATIVE_YRES_HDMI;
+ }
+ /* implicitely read by a memcmp in dss */
+ mdev->video_mode.force_update = true;
+ }
+
+ return count;
+}
+
+static ssize_t show_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ return sprintf(buf, "%d\n", ddev->port->pixel_format);
+}
+
+static ssize_t store_input_pixel_format(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (count > 0) {
+ unsigned long input;
+ if (strict_strtoul(buf, 10, &input) != 0)
+ return -EINVAL;
+ switch (input) {
+ /* intentional fall through */
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ ddev->port->pixel_format = input;
+ break;
+ default:
+ dev_warn(&ddev->dev, "invalid format (%ld)\n",
+ input);
+ return -EINVAL;
+ break;
+ }
+ /* implicitely read by a memcmp in dss */
+ ddev->video_mode.force_update = true;
+ driver_data->update_port_pixel_format = true;
+ }
+
+ return count;
+}
+
+static ssize_t show_disponoff(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (ddev->fbi && driver_data->fbdevname) {
+ dev_dbg(dev, "name:%s\n", driver_data->fbdevname);
+ strcpy(buf, driver_data->fbdevname);
+ return strlen(driver_data->fbdevname) + 1;
+ }
+ return 0;
+}
+
+static ssize_t store_disponoff(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *mdev = to_mcde_display_device(dev);
+ bool enable = false;
+ u8 cea = 0;
+ u8 vesa_cea_nr = 0;
+#ifdef CONFIG_COMPDEV
+ struct mcde_fb *mfb;
+#endif
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if ((count != DISPONOFF_SIZE) && (count != DISPONOFF_SIZE + 1))
+ return -EINVAL;
+
+ if ((*buf == '0') && (*(buf + 1) == '1'))
+ enable = true;
+ cea = (hex_to_bin(buf[2]) << 4) + hex_to_bin(buf[3]);
+ vesa_cea_nr = (hex_to_bin(buf[4]) << 4) + hex_to_bin(buf[5]);
+ dev_dbg(dev, "enable:%d cea:%d nr:%d\n", enable, cea, vesa_cea_nr);
+
+ if (enable && !mdev->fbi) {
+ struct display_driver_data *driver_data = dev_get_drvdata(dev);
+ u16 w = mdev->native_x_res;
+ u16 h = mdev->native_y_res, vh;
+ int buffering = NUM_FB_BUFFERS;
+ struct fb_info *fbi;
+
+ ceanr_convert(mdev, cea, vesa_cea_nr, &w, &h);
+ vh = h * buffering;
+ fbi = mcde_fb_create(mdev, w, h, w, vh,
+ mdev->default_pixel_format, FB_ROTATE_UR);
+ if (IS_ERR(fbi))
+ dev_warn(dev, "fb create failed\n");
+ else
+ driver_data->fbdevname = dev_name(fbi->dev);
+
+#ifdef CONFIG_COMPDEV
+ /* TODO need another way for compdev to get actual size */
+ mdev->native_x_res = w;
+ mdev->native_y_res = h;
+
+ mfb = to_mcde_fb(fbi);
+ /* Create a compdev overlay for this display */
+ if (compdev_create(mdev, mfb->ovlys[0], false) < 0) {
+ dev_warn(&mdev->dev,
+ "Failed to create compdev for display %s\n",
+ mdev->name);
+ } else {
+ dev_dbg(&mdev->dev, "compdev created for (%s)\n",
+ mdev->name);
+ }
+#ifdef CONFIG_CLONEDEV
+ if (clonedev_create()) {
+ dev_warn(&mdev->dev,
+ "Failed to create clonedev for display %s\n",
+ mdev->name);
+ } else {
+ dev_dbg(&mdev->dev, "clonedev created for (%s)\n",
+ mdev->name);
+ }
+#endif
+#endif
+ } else if (!enable && mdev->fbi) {
+#ifdef CONFIG_CLONEDEV
+ clonedev_destroy();
+#endif
+#ifdef CONFIG_COMPDEV
+ compdev_destroy(mdev);
+#endif
+ mcde_fb_destroy(mdev);
+ }
+
+ return count;
+}
+
+static ssize_t show_timing(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ struct mcde_video_mode *video_mode;
+ int index;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ index = 0;
+ if (driver_data->video_mode) {
+ video_mode = driver_data->video_mode;
+ memcpy(buf + index, &video_mode->xres, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->yres, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->pixclock, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->hbp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->hfp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->vbp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->vfp, sizeof(u32));
+ index += sizeof(u32);
+ memcpy(buf + index, &video_mode->interlaced, sizeof(u32));
+ index += sizeof(u32);
+ }
+ return index;
+}
+
+static ssize_t store_timing(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (count != TIMING_SIZE)
+ return -EINVAL;
+
+ driver_data->video_mode = video_mode_get(ddev, *buf, *(buf + 1));
+
+ return count;
+}
+
+static ssize_t store_stayalive(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ if (count != STAYALIVE_SIZE)
+ return -EINVAL;
+
+ if ((*buf == 1) || (*buf == '1'))
+ ddev->stay_alive = true;
+ else
+ ddev->stay_alive = false;
+
+ dev_dbg(dev, "%s %d\n", __func__, ddev->stay_alive);
+
+ return count;
+}
+
+static int ceanr_convert(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr, u16 *w, u16 *h)
+{
+ struct mcde_video_mode *video_mode;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ video_mode = video_mode_get(ddev, cea, vesa_cea_nr);
+ if (video_mode) {
+ *w = video_mode->xres;
+ *h = video_mode->yres;
+ dev_dbg(&ddev->dev, "cea:%d nr:%d found\n",
+ cea, vesa_cea_nr);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Supported HDMI modes */
+static struct mcde_video_mode video_modes_supp_hdmi[] = {
+ /* 0 CEA #1 640_480_60_P */
+ {
+ .xres = 640, .yres = 480,
+ .pixclock = 39682,
+ .hbp = 112, .hfp = 48,
+ .vbp = 33, .vfp = 12
+ },
+ /* 1 720_480_60_P */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 37000,
+ .hbp = 104, .hfp = 34,
+ .vbp = 30, .vfp = 15
+ },
+ /* 2 720_576_50_P */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 37037,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5
+ },
+ /* 3 1280_720_60_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 256, .hfp = 114,
+ .vbp = 20, .vfp = 10
+ },
+ /* 4 1280_720_50_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 440,
+ .vbp = 25, .vfp = 5
+ },
+ /* 5 1280_720_30_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 1760,
+ .vbp = 20, .vfp = 10
+ },
+ /* 6 1280_720_24_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 16835,
+ .hbp = 260, .hfp = 1760,
+ .vbp = 20, .vfp = 10
+ },
+ /* 7 1280_720_25_P */
+ {
+ .xres = 1280, .yres = 720,
+ .pixclock = 13468,
+ .hbp = 260, .hfp = 2420,
+ .vbp = 20, .vfp = 10
+ },
+ /* 8 1920_1080_30_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 189, .hfp = 91,
+ .vbp = 36, .vfp = 9
+ },
+ /* 9 1920_1080_24_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 170, .hfp = 660,
+ .vbp = 36, .vfp = 9
+ },
+ /* 10 1920_1080_25_P */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 528,
+ .vbp = 36, .vfp = 9
+ },
+ /* 11 720_480_60_I */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 74074,
+ .hbp = 126, .hfp = 12,
+ .vbp = 44, .vfp = 1,
+ .interlaced = true,
+ },
+ /* 12 720_576_50_I */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 74074,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5,
+ .interlaced = true,
+ },
+ /* 13 1920_1080_50_I */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 528,
+ .vbp = 20, .vfp = 25,
+ .interlaced = true,
+ },
+ /* 14 1920_1080_60_I */
+ {
+ .xres = 1920, .yres = 1080,
+ .pixclock = 13468,
+ .hbp = 192, .hfp = 88,
+ .vbp = 20, .vfp = 25,
+ .interlaced = true,
+ },
+ /* 15 VESA #9 800_600_60_P */
+ {
+ .xres = 800, .yres = 600,
+ .pixclock = 25000,
+ .hbp = 168, .hfp = 88,
+ .vbp = 23, .vfp = 5,
+ .interlaced = false,
+ },
+ /* 16 VESA #14 848_480_60_P */
+ {
+ .xres = 848, .yres = 480,
+ .pixclock = 29630,
+ .hbp = 128, .hfp = 112,
+ .vbp = 23, .vfp = 14,
+ .interlaced = false,
+ },
+ /* 17 VESA #16 1024_768_60_P */
+ {
+ .xres = 1024, .yres = 768,
+ .pixclock = 15385,
+ .hbp = 160, .hfp = 160,
+ .vbp = 29, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 18 VESA #22 1280_768_60_P */
+ {
+ .xres = 1280, .yres = 768,
+ .pixclock = 14652,
+ .hbp = 80, .hfp = 80,
+ .vbp = 12, .vfp = 10,
+ .interlaced = false,
+ },
+ /* 19 VESA #23 1280_768_60_P */
+ {
+ .xres = 1280, .yres = 768,
+ .pixclock = 12579,
+ .hbp = 192, .hfp = 192,
+ .vbp = 20, .vfp = 10,
+ .interlaced = false,
+ },
+ /* 20 VESA #27 1280_800_60_P */
+ {
+ .xres = 1280, .yres = 800,
+ .pixclock = 14085,
+ .hbp = 80, .hfp = 80,
+ .vbp = 14, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 21 VESA #28 1280_800_60_P */
+ {
+ .xres = 1280, .yres = 800,
+ .pixclock = 11976,
+ .hbp = 200, .hfp = 200,
+ .vbp = 22, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 22 VESA #39 1360_768_60_P */
+ {
+ .xres = 1360, .yres = 768,
+ .pixclock = 11696,
+ .hbp = 176, .hfp = 256,
+ .vbp = 18, .vfp = 9,
+ .interlaced = false,
+ },
+ /* 23 VESA #81 1366_768_60_P */
+ {
+ .xres = 1366, .yres = 768,
+ .pixclock = 11662,
+ .hbp = 213, .hfp = 213,
+ .vbp = 24, .vfp = 6,
+ .interlaced = false,
+ },
+};
+
+/* Supported TVout modes */
+static struct mcde_video_mode video_modes_supp_sdtv[] = {
+ /* 720_480_60_I) */
+ {
+ .xres = 720, .yres = 480,
+ .pixclock = 74074,
+ .hbp = 126, .hfp = 12,
+ .vbp = 44, .vfp = 1,
+ .interlaced = true,
+ },
+ /* 720_576_50_I) */
+ {
+ .xres = 720, .yres = 576,
+ .pixclock = 74074,
+ .hbp = 132, .hfp = 12,
+ .vbp = 44, .vfp = 5,
+ .interlaced = true,
+ },
+};
+
+static struct cea_vesa_video_mode cea_vesa_video_mode[] = {
+ /* 640_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 1,
+ .video_mode = &video_modes_supp_hdmi[0],
+ },
+ /* 720_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 2,
+ .video_mode = &video_modes_supp_hdmi[1],
+ },
+ /* 720_480_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 3,
+ .video_mode = &video_modes_supp_hdmi[1],
+ },
+ /* 720_576_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 17,
+ .video_mode = &video_modes_supp_hdmi[2],
+ },
+ /* 720_576_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 18,
+ .video_mode = &video_modes_supp_hdmi[2],
+ },
+ /* 1280_720_60_P */
+ {
+ .cea = 1, .vesa_cea_nr = 4,
+ .video_mode = &video_modes_supp_hdmi[3],
+ },
+ /* 1280_720_50_P */
+ {
+ .cea = 1, .vesa_cea_nr = 19,
+ .video_mode = &video_modes_supp_hdmi[4],
+ },
+ /* 1280_720_30_P */
+ {
+ .cea = 1, .vesa_cea_nr = 62,
+ .video_mode = &video_modes_supp_hdmi[5],
+ },
+ /* 1280_720_24_P */
+ {
+ .cea = 1, .vesa_cea_nr = 60,
+ .video_mode = &video_modes_supp_hdmi[6],
+ },
+ /* 1280_720_25_P */
+ {
+ .cea = 1, .vesa_cea_nr = 61,
+ .video_mode = &video_modes_supp_hdmi[7],
+ },
+ /* 1920_1080_30_P */
+ {
+ .cea = 1, .vesa_cea_nr = 34,
+ .video_mode = &video_modes_supp_hdmi[8],
+ },
+ /* 1920_1080_24_P */
+ {
+ .cea = 1, .vesa_cea_nr = 32,
+ .video_mode = &video_modes_supp_hdmi[9],
+ },
+ /* 1920_1080_25_P */
+ {
+ .cea = 1, .vesa_cea_nr = 33,
+ .video_mode = &video_modes_supp_hdmi[10],
+ },
+ /* 720_480_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 6,
+ .video_mode = &video_modes_supp_hdmi[11],
+ },
+ /* 720_480_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 7,
+ .video_mode = &video_modes_supp_hdmi[11],
+ },
+ /* 720_576_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 21,
+ .video_mode = &video_modes_supp_hdmi[12],
+ },
+ /* 720_576_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 22,
+ .video_mode = &video_modes_supp_hdmi[12],
+ },
+ /* 1920_1080_50_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 20,
+ .video_mode = &video_modes_supp_hdmi[13],
+ },
+ /* 1920_1080_60_I) */
+ {
+ .cea = 1, .vesa_cea_nr = 5,
+ .video_mode = &video_modes_supp_hdmi[14],
+ },
+ /* VESA #4 640_480_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 4,
+ .video_mode = &video_modes_supp_hdmi[0],
+ },
+ /* VESA #9 800_600_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 9,
+ .video_mode = &video_modes_supp_hdmi[15],
+ },
+ /* VESA #14 848_480_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 14,
+ .video_mode = &video_modes_supp_hdmi[16],
+ },
+ /* VESA #16 1024_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 16,
+ .video_mode = &video_modes_supp_hdmi[17],
+ },
+ /* VESA #22 1280_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 22,
+ .video_mode = &video_modes_supp_hdmi[18],
+ },
+ /* VESA #23 1280_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 23,
+ .video_mode = &video_modes_supp_hdmi[19],
+ },
+ /* VESA #27 1280_800_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 27,
+ .video_mode = &video_modes_supp_hdmi[20],
+ },
+ /* VESA #28 1280_800_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 28,
+ .video_mode = &video_modes_supp_hdmi[21],
+ },
+ /* VESA #39 1360_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 39,
+ .video_mode = &video_modes_supp_hdmi[22],
+ },
+ /* VESA #81 1366_768_60_P) */
+ {
+ .cea = 0, .vesa_cea_nr = 81,
+ .video_mode = &video_modes_supp_hdmi[23],
+ },
+};
+
+static ssize_t show_vesacea(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int findex;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++) {
+ *(buf + findex * 2) = cea_vesa_video_mode[findex].cea;
+ *(buf + findex * 2 + 1) =
+ cea_vesa_video_mode[findex].vesa_cea_nr;
+ }
+ *(buf + findex * 2) = '\0';
+
+ return findex * 2 + 1;
+}
+
+static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev,
+ u8 cea, u8 vesa_cea_nr)
+{
+ int findex;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++)
+ if ((cea == cea_vesa_video_mode[findex].cea) &&
+ (vesa_cea_nr ==
+ cea_vesa_video_mode[findex].vesa_cea_nr)) {
+ dev_dbg(&ddev->dev, "cea:%d nr:%d\n", cea, vesa_cea_nr);
+ return cea_vesa_video_mode[findex].video_mode;
+ }
+
+ return NULL;
+}
+
+static u8 ceanr_get(struct mcde_display_device *ddev)
+{
+ int cnt;
+ int cea;
+ int vesa_cea_nr;
+ struct mcde_video_mode *vmode = &ddev->video_mode;
+ struct mcde_video_mode *vmode_try;
+
+ if (!vmode)
+ return 0;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+
+ for (cnt = 0; cnt < ARRAY_SIZE(cea_vesa_video_mode); cnt++) {
+ vmode_try = cea_vesa_video_mode[cnt].video_mode;
+ cea = cea_vesa_video_mode[cnt].cea;
+ vesa_cea_nr = cea_vesa_video_mode[cnt].vesa_cea_nr;
+
+ if (cea && vmode_try->xres == vmode->xres &&
+ vmode_try->yres == vmode->yres &&
+ vmode_try->pixclock == vmode->pixclock &&
+ vmode_try->hbp == vmode->hbp &&
+ vmode_try->hfp == vmode->hfp &&
+ vmode_try->vbp == vmode->vbp &&
+ vmode_try->vfp == vmode->vfp &&
+ vmode_try->interlaced == vmode->interlaced) {
+ dev_dbg(&ddev->dev, "ceanr:%d\n", vesa_cea_nr);
+ return vesa_cea_nr;
+ }
+ }
+
+ return 0;
+}
+
+#define AV8100_MAX_LEVEL 255
+
+static int hdmi_try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int index = 0;
+ int match_level = AV8100_MAX_LEVEL;
+ int found_index = -1;
+ struct mcde_video_mode *video_modes_supp;
+ int array_size;
+
+ if (ddev == NULL || video_mode == NULL) {
+ pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+
+ if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ video_mode->interlaced = true;
+ video_modes_supp = video_modes_supp_sdtv;
+ array_size = ARRAY_SIZE(video_modes_supp_sdtv);
+ } else {
+ video_modes_supp = video_modes_supp_hdmi;
+ array_size = ARRAY_SIZE(video_modes_supp_hdmi);
+ }
+
+ while (index < array_size) {
+ /* 1. Check if all parameters match */
+ if ((video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ ((video_mode->xres + video_mode->hbp +
+ video_mode->hfp) ==
+ (video_modes_supp[index].xres +
+ video_modes_supp[index].hbp +
+ video_modes_supp[index].hfp)) &&
+ ((video_mode->yres + video_mode->vbp + video_mode->vfp)
+ ==
+ (video_modes_supp[index].yres +
+ video_modes_supp[index].vbp +
+ video_modes_supp[index].vfp)) &&
+ (video_mode->pixclock ==
+ video_modes_supp[index].pixclock) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 1;
+ found_index = index;
+ break;
+ }
+
+ /* 2. Check if xres,yres,htot,vtot,interlaced match */
+ if ((match_level > 2) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ ((video_mode->xres + video_mode->hbp +
+ video_mode->hfp) ==
+ (video_modes_supp[index].xres +
+ video_modes_supp[index].hbp +
+ video_modes_supp[index].hfp)) &&
+ ((video_mode->yres + video_mode->vbp + video_mode->vfp)
+ ==
+ (video_modes_supp[index].yres +
+ video_modes_supp[index].vbp +
+ video_modes_supp[index].vfp)) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 2;
+ found_index = index;
+ }
+
+ /* 3. Check if xres,yres,pixelclock,interlaced match */
+ if ((match_level > 3) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced) &&
+ (video_mode->pixclock ==
+ video_modes_supp[index].pixclock)) {
+ match_level = 3;
+ found_index = index;
+ }
+
+ /* 4. Check if xres,yres,interlaced match */
+ if ((match_level > 4) &&
+ (video_mode->xres == video_modes_supp[index].xres) &&
+ (video_mode->yres == video_modes_supp[index].yres) &&
+ (video_mode->interlaced ==
+ video_modes_supp[index].interlaced)) {
+ match_level = 4;
+ found_index = index;
+ }
+
+ index++;
+ }
+
+ if (found_index == -1) {
+ dev_dbg(&ddev->dev, "video_mode not accepted\n");
+ dev_dbg(&ddev->dev, "xres:%d yres:%d pixclock:%d hbp:%d hfp:%d "
+ "vfp:%d vbp:%d intlcd:%d\n",
+ video_mode->xres, video_mode->yres,
+ video_mode->pixclock,
+ video_mode->hbp, video_mode->hfp,
+ video_mode->vfp, video_mode->vbp,
+ video_mode->interlaced);
+ return -EINVAL;
+ }
+
+ memset(video_mode, 0, sizeof(struct mcde_video_mode));
+ memcpy(video_mode, &video_modes_supp[found_index],
+ sizeof(struct mcde_video_mode));
+
+ dev_dbg(&ddev->dev, "%s:HDMI video_mode %d chosen. Level:%d\n",
+ __func__, found_index, match_level);
+
+ return 0;
+}
+
+static int hdmi_set_video_mode(
+ struct mcde_display_device *dev, struct mcde_video_mode *video_mode)
+{
+ int ret;
+ union av8100_configuration av8100_config;
+ struct mcde_display_hdmi_platform_data *pdata;
+ struct display_driver_data *driver_data;
+ struct av8100_status status;
+
+ /* TODO check video_mode_params */
+ if (dev == NULL || video_mode == NULL) {
+ pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = dev->dev.platform_data;
+ driver_data = dev_get_drvdata(&dev->dev);
+
+ dev_dbg(&dev->dev, "%s:\n", __func__);
+ dev_vdbg(&dev->dev, "%s:xres:%d yres:%d hbp:%d hfp:%d vbp:%d vfp:%d "
+ "interlaced:%d\n", __func__,
+ video_mode->xres,
+ video_mode->yres,
+ video_mode->hbp,
+ video_mode->hfp,
+ video_mode->vbp,
+ video_mode->vfp,
+ video_mode->interlaced);
+
+ if (driver_data->update_port_pixel_format) {
+ hdmi_set_pixel_format(dev, dev->pixel_format);
+ driver_data->update_port_pixel_format = false;
+ }
+
+ memset(&(dev->video_mode), 0, sizeof(struct mcde_video_mode));
+ memcpy(&(dev->video_mode), video_mode, sizeof(struct mcde_video_mode));
+
+ if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 &&
+ pdata->rgb_2_yCbCr_transform)
+ mcde_chnl_set_col_convert(dev->chnl_state,
+ pdata->rgb_2_yCbCr_transform,
+ MCDE_CONVERT_RGB_2_YCBCR);
+ mcde_chnl_stop_flow(dev->chnl_state);
+
+ ret = mcde_chnl_set_video_mode(dev->chnl_state, &dev->video_mode);
+ if (ret < 0) {
+ dev_warn(&dev->dev, "Failed to set video mode\n");
+ return ret;
+ }
+
+ status = av8100_status_get();
+ if (status.av8100_state == AV8100_OPMODE_UNDEFINED)
+ return -EINVAL;
+
+ if (av8100_ver_get() == AV8100_CHIPVER_1) {
+ if (status.av8100_state >= AV8100_OPMODE_STANDBY) {
+ /* Disable interrupts */
+ ret = av8100_disable_interrupt();
+ if (ret) {
+ dev_err(&dev->dev,
+ "%s:av8100_disable_interrupt failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_powerdown();
+ if (ret) {
+ dev_err(&dev->dev,
+ "av8100_powerdown failed\n");
+ return ret;
+ }
+
+ msleep(10);
+ }
+ }
+
+ /* Set to powerup with interrupts disabled */
+ status = av8100_status_get();
+ if (status.av8100_state < AV8100_OPMODE_STANDBY) {
+ ret = av8100_powerup();
+ if (ret) {
+ dev_err(&dev->dev, "av8100_powerup failed\n");
+ return ret;
+ }
+ }
+
+ if (status.av8100_state <= AV8100_OPMODE_IDLE) {
+ ret = av8100_download_firmware(I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "av8100_download_firmware failed\n");
+ return ret;
+ }
+ }
+
+ if (av8100_disable_interrupt())
+ return -EFAULT;
+
+ /*
+ * Don't look at dev->port->hdmi_sdtv_switch; it states only which
+ * one should be started, not which one is currently working
+ */
+ if (av8100_conf_get(AV8100_COMMAND_HDMI, &av8100_config))
+ return -EFAULT;
+ if (av8100_config.hdmi_format.hdmi_mode == AV8100_HDMI_ON) {
+ /* Set HDMI mode to OFF */
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ av8100_config.hdmi_format.hdmi_format = AV8100_HDMI;
+ if (av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config))
+ return -EFAULT;
+
+ if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE))
+ return -EFAULT;
+ }
+ if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ if (av8100_config.denc_format.enable) {
+ /* Turn off DENC */
+ av8100_config.denc_format.enable = 0;
+ if (av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ if (av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL,
+ I2C_INTERFACE))
+ return -EFAULT;
+ }
+
+ /* Get current av8100 video output format */
+ ret = av8100_conf_get(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_get "
+ "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH)
+ av8100_config.video_output_format.video_output_cea_vesa =
+ dev->video_mode.yres == NATIVE_YRES_SDTV ?
+ AV8100_CEA21_22_576I_PAL_50HZ :
+ AV8100_CEA6_7_NTSC_60HZ;
+ else
+ av8100_config.video_output_format.video_output_cea_vesa =
+ av8100_video_output_format_get(
+ dev->video_mode.xres,
+ dev->video_mode.yres,
+ dev->video_mode.xres +
+ dev->video_mode.hbp + dev->video_mode.hfp,
+ dev->video_mode.yres +
+ dev->video_mode.vbp + dev->video_mode.vfp,
+ dev->video_mode.pixclock,
+ dev->video_mode.interlaced);
+
+ if (AV8100_VIDEO_OUTPUT_CEA_VESA_MAX ==
+ av8100_config.video_output_format.video_output_cea_vesa) {
+ dev_err(&dev->dev, "%s:video output format not found "
+ "\n", __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Get current av8100 video input format */
+ ret = av8100_conf_get(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_get "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set correct av8100 video input pixel format */
+ switch (dev->port->pixel_format) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ default:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB565;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB666;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB666P;
+ break;
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_RGB888;
+ break;
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ av8100_config.video_input_format.input_pixel_format =
+ AV8100_INPUT_PIX_YCBCR422;
+ break;
+ }
+
+ /* Set ui_x4 */
+ av8100_config.video_input_format.ui_x4 = dev->port->phy.dsi.ui;
+
+ /* Set TE_config */
+ switch (dev->port->sync_src) {
+ case MCDE_SYNCSRC_TE0:
+ av8100_config.video_input_format.TE_config = AV8100_TE_IT_LINE;
+ break;
+ case MCDE_SYNCSRC_TE1:
+ av8100_config.video_input_format.TE_config = AV8100_TE_GPIO_IT;
+ break;
+ case MCDE_SYNCSRC_TE_POLLING:
+ av8100_config.video_input_format.TE_config =
+ AV8100_TE_DSI_LANE; /* Only on DSI, no interrupts */
+ break;
+ case MCDE_SYNCSRC_OFF:
+ default:
+ av8100_config.video_input_format.TE_config = AV8100_TE_OFF;
+ break;
+ }
+
+ ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_w(AV8100_COMMAND_VIDEO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ if (dev->port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422)
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_RGB_TO_DENC;
+ else
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_YUV_TO_DENC;
+ } else if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422) {
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_YUV_TO_RGB;
+ } else {
+ av8100_config.color_transform =
+ AV8100_COLOR_TRANSFORM_INDENTITY;
+ }
+
+ ret = av8100_conf_prep(
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ &av8100_config);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_configuration_prepare "
+ "AV8100_COMMAND_COLORSPACECONVERSION failed\n",
+ __func__);
+ return ret;
+ }
+
+ ret = av8100_conf_w(
+ AV8100_COMMAND_COLORSPACECONVERSION,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_COLORSPACECONVERSION failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Set video output format */
+ ret = av8100_conf_w(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "av8100_conf_w failed\n");
+ return ret;
+ }
+
+ /* Set audio input format */
+ ret = av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT,
+ NULL, NULL, I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_AUDIO_INPUT_FORMAT failed\n",
+ __func__);
+ return ret;
+ }
+
+ dev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+ dev->first_update = true;
+
+ return 0;
+}
+
+static u16 rotate_byte_left(u8 c, int nr)
+{
+ return (0xff & (c << nr)) | (0xff & (c >> (8 - nr)));
+}
+
+static u16 map_yv(u8 in)
+{
+ return rotate_byte_left(in, 3) << 4;
+}
+
+static u16 map_u(u8 in)
+{
+ return rotate_byte_left(in, 5) << 4;
+}
+
+static int hdmi_set_pixel_format(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format)
+{
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ ddev->pixel_format = format;
+
+ return 0;
+}
+
+static int hdmi_set_port_pixel_format(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "%s\n", __func__);
+ mcde_chnl_stop_flow(ddev->chnl_state);
+ ret = mcde_chnl_set_pixel_format(ddev->chnl_state,
+ ddev->port->pixel_format);
+
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s: Failed to set pixel format = %d\n",
+ __func__, ddev->port->pixel_format);
+ return ret;
+ }
+
+ if (ddev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 &&
+ av8100_ver_get() == 2) {
+ /* The V2 version has an error for unpacking YUV422 */
+ struct mcde_palette_table palette = {
+ .map_col_ch0 = *map_yv,
+ .map_col_ch1 = *map_u,
+ .map_col_ch2 = *map_yv,
+ };
+ ret = mcde_chnl_set_palette(ddev->chnl_state, &palette);
+ } else {
+ ret = mcde_chnl_set_palette(ddev->chnl_state, NULL);
+ }
+
+ return 0;
+}
+
+static int hdmi_apply_config(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (!ddev->update_flags)
+ return 0;
+
+ ret = mcde_chnl_apply(ddev->chnl_state);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to apply to channel\n",
+ __func__);
+ return ret;
+ }
+ ddev->update_flags = 0;
+
+ return 0;
+}
+
+static int hdmi_on_first_update(struct mcde_display_device *dev)
+{
+ int ret;
+ union av8100_configuration av8100_config;
+ u8 *infofr_data;
+ int infofr_crc;
+ int cnt;
+
+ dev->first_update = false;
+
+ /*
+ * Prepare HDMI configuration
+ * Avoid simultaneous output of DENC and HDMI/DVI.
+ * Only one of them should be enabled.
+ * Note HDMI/DVI and DENC are always turned off in set_video_mode.
+ */
+ switch (dev->port->hdmi_sdtv_switch) {
+ case SDTV_SWITCH:
+ if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config))
+ return -EFAULT;
+ av8100_config.denc_format.enable = 1;
+ if (dev->video_mode.yres == NATIVE_YRES_SDTV) {
+ av8100_config.denc_format.standard_selection =
+ AV8100_PAL_BDGHI;
+ av8100_config.denc_format.cvbs_video_format =
+ AV8100_CVBS_625;
+ } else {
+ av8100_config.denc_format.standard_selection =
+ AV8100_NTSC_M;
+ av8100_config.denc_format.cvbs_video_format =
+ AV8100_CVBS_525;
+ }
+ ret = av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config);
+ break;
+ case DVI_SWITCH:
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ av8100_config.hdmi_format.hdmi_format = AV8100_DVI;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config);
+ break;
+ case HDMI_SWITCH:
+ default:
+ av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON;
+ av8100_config.hdmi_format.hdmi_format = AV8100_HDMI;
+ av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0;
+ ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config);
+ break;
+ }
+
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_prep "
+ "AV8100_COMMAND_HDMI/DENC failed\n", __func__);
+ return ret;
+ }
+
+ /* Enable interrupts */
+ ret = av8100_enable_interrupt();
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_enable_interrupt failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH)
+ ret = av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL,
+ I2C_INTERFACE);
+ else
+ ret = av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL,
+ I2C_INTERFACE);
+ if (ret) {
+ dev_err(&dev->dev, "%s:av8100_conf_w "
+ "AV8100_COMMAND_HDMI/DENC failed\n", __func__);
+ return ret;
+ }
+
+ /* AVI Infoframe only if HDMI */
+ if (dev->port->hdmi_sdtv_switch != HDMI_SWITCH)
+ goto hdmi_on_first_update_end;
+
+ /* Create AVI Infoframe */
+ av8100_config.infoframes_format.type = AVI_INFOFRAME_TYPE;
+ av8100_config.infoframes_format.version = AVI_INFOFRAME_VERSION;
+ av8100_config.infoframes_format.length = AVI_INFOFRAME_DATA_SIZE;
+
+ /* AVI Infoframe data */
+ infofr_data = &av8100_config.infoframes_format.data[0];
+ memset(infofr_data, 0, AVI_INFOFRAME_DATA_SIZE);
+ infofr_data[0] = AVI_INFOFRAME_DB1;
+ infofr_data[1] = AVI_INFOFRAME_DB2;
+ infofr_data[3] = ceanr_get(dev);
+
+ /* Calculate AVI Infoframe checksum */
+ infofr_crc = av8100_config.infoframes_format.type +
+ av8100_config.infoframes_format.version +
+ av8100_config.infoframes_format.length;
+ for (cnt = 0; cnt < AVI_INFOFRAME_DATA_SIZE; cnt++)
+ infofr_crc += infofr_data[cnt];
+ infofr_crc &= 0xFF;
+ av8100_config.infoframes_format.crc = 0x100 - infofr_crc;
+
+ /* Send AVI Infoframe */
+ if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES,
+ &av8100_config) != 0) {
+ dev_err(&dev->dev, "av8100_conf_prep FAIL\n");
+ return -EINVAL;
+ }
+
+ if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES,
+ NULL, NULL, I2C_INTERFACE) != 0) {
+ dev_err(&dev->dev, "av8100_conf_w FAIL\n");
+ return -EINVAL;
+ }
+
+hdmi_on_first_update_end:
+ return ret;
+}
+
+static int hdmi_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev);
+ int ret = 0;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * the regulator for analog TV out is only enabled here,
+ * this means that one needs to switch to the OFF state
+ * to be able to switch from HDMI to CVBS.
+ */
+ if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) {
+ ret = regulator_enable(driver_data->cvbs_regulator);
+ if (ret)
+ return ret;
+ driver_data->cvbs_regulator_enabled = true;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+
+ hdmi_set_port_pixel_format(ddev);
+ }
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ goto set_power_and_exit;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode));
+ ret = av8100_powerscan();
+ if (ret)
+ dev_err(&ddev->dev, "%s:av8100_powerscan failed\n"
+ , __func__);
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ if (driver_data->cvbs_regulator_enabled) {
+ ret = regulator_disable(driver_data->cvbs_regulator);
+ if (ret)
+ return ret;
+ driver_data->cvbs_regulator_enabled = false;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+set_power_and_exit:
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static int hdmi_set_rotation(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ /* Not possible to rotate HDMI */
+ return 0;
+}
+
+static int __devinit hdmi_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ struct mcde_port *port;
+ struct display_driver_data *driver_data;
+ struct mcde_display_hdmi_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->port->type != MCDE_PORTTYPE_DSI) {
+ dev_err(&dev->dev, "%s:Invalid port type %d\n",
+ __func__, dev->port->type);
+ return -EINVAL;
+ }
+
+ driver_data = (struct display_driver_data *)
+ kzalloc(sizeof(struct display_driver_data), GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(&dev->dev, "Failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ /* DSI use clock continous mode if AV8100_CHIPVER_1 > 1 */
+ if (av8100_ver_get() > AV8100_CHIPVER_1)
+ dev->port->phy.dsi.clk_cont = true;
+
+ dev->on_first_update = hdmi_on_first_update;
+ dev->try_video_mode = hdmi_try_video_mode;
+ dev->set_video_mode = hdmi_set_video_mode;
+ dev->apply_config = hdmi_apply_config;
+ dev->set_pixel_format = hdmi_set_pixel_format;
+ dev->set_power_mode = hdmi_set_power_mode;
+ dev->set_rotation = hdmi_set_rotation;
+
+ port = dev->port;
+
+ port->phy.dsi.host_eot_gen = true;
+ port->phy.dsi.num_data_lanes = 2;
+ port->phy.dsi.hs_freq = DSI_HS_FREQ_HZ;
+ port->phy.dsi.lp_freq = DSI_LP_FREQ_HZ;
+
+ /* Create sysfs files */
+ if (device_create_file(&dev->dev, &dev_attr_hdmisdtvswitch))
+ dev_info(&dev->dev,
+ "Unable to create hdmisdtvswitch attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_input_pixel_format))
+ dev_info(&dev->dev,
+ "Unable to create input_pixel_format attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_disponoff))
+ dev_info(&dev->dev,
+ "Unable to create disponoff attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_vesacea))
+ dev_info(&dev->dev,
+ "Unable to create ceavesa attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_timing))
+ dev_info(&dev->dev,
+ "Unable to create timing attr\n");
+ if (device_create_file(&dev->dev, &dev_attr_stayalive))
+ dev_info(&dev->dev,
+ "Unable to create stayalive attr\n");
+
+ if (pdata->cvbs_regulator_id) {
+ driver_data->cvbs_regulator = regulator_get(&dev->dev,
+ pdata->cvbs_regulator_id);
+ if (IS_ERR(driver_data->cvbs_regulator)) {
+ ret = PTR_ERR(driver_data->cvbs_regulator);
+ dev_warn(&dev->dev, "%s:Failed to get regulator %s\n",
+ __func__, pdata->cvbs_regulator_id);
+ driver_data->cvbs_regulator = NULL;
+ goto av_regulator_get_failed;
+ }
+ }
+
+ dev_set_drvdata(&dev->dev, driver_data);
+ dev_info(&dev->dev, "HDMI display probed\n");
+
+ return 0;
+
+av_regulator_get_failed:
+ kfree(driver_data);
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct mcde_display_device *dev)
+{
+ struct display_driver_data *driver_data = dev_get_drvdata(&dev->dev);
+ struct mcde_display_hdmi_platform_data *pdata =
+ dev->dev.platform_data;
+
+ /* Remove sysfs files */
+ device_remove_file(&dev->dev, &dev_attr_input_pixel_format);
+ device_remove_file(&dev->dev, &dev_attr_hdmisdtvswitch);
+ device_remove_file(&dev->dev, &dev_attr_disponoff);
+ device_remove_file(&dev->dev, &dev_attr_vesacea);
+ device_remove_file(&dev->dev, &dev_attr_timing);
+ device_remove_file(&dev->dev, &dev_attr_stayalive);
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ if (driver_data->cvbs_regulator)
+ regulator_put(driver_data->cvbs_regulator);
+ kfree(driver_data);
+ if (pdata->hdmi_platform_enable) {
+ if (pdata->regulator)
+ regulator_put(pdata->regulator);
+ if (pdata->reset_gpio) {
+ gpio_direction_input(pdata->reset_gpio);
+ gpio_free(pdata->reset_gpio);
+ }
+ }
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int hdmi_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (ddev->chnl_state == NULL)
+ return 0;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+
+ return ret;
+}
+
+static int hdmi_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret;
+
+ if (ddev->chnl_state == NULL)
+ return 0;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = hdmi_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = hdmi_suspend,
+ .resume = hdmi_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "av8100_hdmi",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_hdmi_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&hdmi_driver);
+
+}
+late_initcall(mcde_display_hdmi_init);
+
+static void __exit mcde_display_hdmi_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&hdmi_driver);
+}
+module_exit(mcde_display_hdmi_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson hdmi display driver");
diff --git a/drivers/video/mcde/display-fictive.c b/drivers/video/mcde/display-fictive.c
new file mode 100644
index 00000000000..c7ea1429b9f
--- /dev/null
+++ b/drivers/video/mcde/display-fictive.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * ST-Ericsson MCDE fictive display driver
+ *
+ * Author: Per Persson <per.xb.persson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+
+static int __devinit fictive_probe(struct mcde_display_device *dev)
+{
+ dev->platform_enable = NULL,
+ dev->platform_disable = NULL,
+ dev->set_power_mode = NULL;
+
+ dev_info(&dev->dev, "Fictive display probed\n");
+
+ return 0;
+}
+
+static int __devexit fictive_remove(struct mcde_display_device *dev)
+{
+ return 0;
+}
+
+static struct mcde_display_driver fictive_driver = {
+ .probe = fictive_probe,
+ .remove = fictive_remove,
+ .driver = {
+ .name = "mcde_disp_fictive",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_fictive_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&fictive_driver);
+}
+module_init(mcde_display_fictive_init);
+
+static void __exit mcde_display_fictive_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&fictive_driver);
+}
+module_exit(mcde_display_fictive_exit);
+
+MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE fictive display driver");
diff --git a/drivers/video/mcde/display-generic_dsi.c b/drivers/video/mcde/display-generic_dsi.c
new file mode 100644
index 00000000000..dfbaa4a8765
--- /dev/null
+++ b/drivers/video/mcde/display-generic_dsi.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE generic DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-generic_dsi.h>
+
+static int generic_platform_enable(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev_dbg(&dev->dev, "%s: Reset & power on generic display\n", __func__);
+
+ if (pdata->regulator) {
+ if (regulator_enable(pdata->regulator) < 0) {
+ dev_err(&dev->dev, "%s:Failed to enable regulator\n"
+ , __func__);
+ return -EINVAL;
+ }
+ }
+ if (pdata->reset_gpio)
+ gpio_set_value_cansleep(pdata->reset_gpio, pdata->reset_high);
+ mdelay(pdata->reset_delay);
+ if (pdata->reset_gpio)
+ gpio_set_value_cansleep(pdata->reset_gpio, !pdata->reset_high);
+
+ return 0;
+}
+
+static int generic_platform_disable(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev_dbg(&dev->dev, "%s:Reset & power off generic display\n", __func__);
+
+ if (pdata->regulator) {
+ if (regulator_disable(pdata->regulator) < 0) {
+ dev_err(&dev->dev, "%s:Failed to disable regulator\n"
+ , __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int generic_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ struct mcde_display_generic_platform_data *pdata =
+ ddev->dev.platform_data;
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ msleep(pdata->sleep_out_delay);
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_ON, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ goto set_power_and_exit;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_OFF, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* SLEEP -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+set_power_and_exit:
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static int __devinit generic_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&dev->dev, "%s:Platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dev->port->type != MCDE_PORTTYPE_DSI) {
+ dev_err(&dev->dev,
+ "%s:Invalid port type %d\n",
+ __func__, dev->port->type);
+ return -EINVAL;
+ }
+
+ if (!dev->platform_enable && !dev->platform_disable) {
+ pdata->generic_platform_enable = true;
+ if (pdata->reset_gpio) {
+ ret = gpio_request(pdata->reset_gpio, NULL);
+ if (ret) {
+ dev_warn(&dev->dev,
+ "%s:Failed to request gpio %d\n",
+ __func__, pdata->reset_gpio);
+ goto gpio_request_failed;
+ }
+ gpio_direction_output(pdata->reset_gpio,
+ !pdata->reset_high);
+ }
+ if (pdata->regulator_id) {
+ pdata->regulator = regulator_get(&dev->dev,
+ pdata->regulator_id);
+ if (IS_ERR(pdata->regulator)) {
+ ret = PTR_ERR(pdata->regulator);
+ dev_warn(&dev->dev,
+ "%s:Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_id);
+ pdata->regulator = NULL;
+ goto regulator_get_failed;
+ }
+
+ if (regulator_set_voltage(pdata->regulator,
+ pdata->min_supply_voltage,
+ pdata->max_supply_voltage) < 0) {
+ int volt;
+
+ dev_warn(&dev->dev,
+ "%s:Failed to set voltage '%s'\n",
+ __func__, pdata->regulator_id);
+ volt = regulator_get_voltage(pdata->regulator);
+ dev_warn(&dev->dev,
+ "Voltage:%d\n", volt);
+ }
+
+ /*
+ * When u-boot has display a startup screen.
+ * U-boot has turned on display power however the
+ * regulator framework does not know about that
+ * This is the case here, the display driver has to
+ * enable the regulator for the display.
+ */
+ if (dev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ ret = regulator_enable(pdata->regulator);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "%s:Failed to enable regulator\n"
+ , __func__);
+ goto regulator_enable_failed;
+ }
+ }
+ }
+ }
+
+ dev->platform_enable = generic_platform_enable,
+ dev->platform_disable = generic_platform_disable,
+ dev->set_power_mode = generic_set_power_mode;
+
+ dev_info(&dev->dev, "Generic display probed\n");
+
+ goto out;
+regulator_enable_failed:
+regulator_get_failed:
+ if (pdata->generic_platform_enable && pdata->reset_gpio)
+ gpio_free(pdata->reset_gpio);
+gpio_request_failed:
+out:
+ return ret;
+}
+
+static int __devexit generic_remove(struct mcde_display_device *dev)
+{
+ struct mcde_display_generic_platform_data *pdata =
+ dev->dev.platform_data;
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ if (!pdata->generic_platform_enable)
+ return 0;
+
+ if (pdata->regulator)
+ regulator_put(pdata->regulator);
+ if (pdata->reset_gpio) {
+ gpio_direction_input(pdata->reset_gpio);
+ gpio_free(pdata->reset_gpio);
+ }
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int generic_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ return ret;
+}
+
+static int generic_suspend(struct mcde_display_device *ddev, pm_message_t state)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver generic_driver = {
+ .probe = generic_probe,
+ .remove = generic_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = generic_suspend,
+ .resume = generic_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde_disp_generic",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_generic_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&generic_driver);
+}
+module_init(mcde_display_generic_init);
+
+static void __exit mcde_display_generic_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&generic_driver);
+}
+module_exit(mcde_display_generic_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE generic DCS display driver");
diff --git a/drivers/video/mcde/display-samsung_s6d16d0.c b/drivers/video/mcde/display-samsung_s6d16d0.c
new file mode 100644
index 00000000000..900c3f70aa6
--- /dev/null
+++ b/drivers/video/mcde/display-samsung_s6d16d0.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Samsung S6D16D0 display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <video/mcde_display.h>
+
+#define RESET_DURATION_US 10
+#define RESET_DELAY_MS 120
+#define SLEEP_OUT_DELAY_MS 120
+#define IO_REGU "vdd1"
+#define IO_REGU_MIN 1650000
+#define IO_REGU_MAX 3300000
+
+#define DSI_HS_FREQ_HZ 420160000
+#define DSI_LP_FREQ_HZ 19200000
+
+struct device_info {
+ int reset_gpio;
+ struct mcde_port port;
+ struct regulator *regulator;
+};
+
+static inline struct device_info *get_drvdata(struct mcde_display_device *ddev)
+{
+ return (struct device_info *)dev_get_drvdata(&ddev->dev);
+}
+
+static int power_on(struct mcde_display_device *ddev)
+{
+ struct device_info *di = get_drvdata(ddev);
+
+ dev_dbg(&ddev->dev, "Reset & power on s6d16d0 display\n");
+
+ regulator_enable(di->regulator);
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ udelay(RESET_DURATION_US);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+
+ return 0;
+}
+
+static int power_off(struct mcde_display_device *ddev)
+{
+ struct device_info *di = get_drvdata(ddev);
+
+ dev_dbg(&ddev->dev, "Power off s6d16d0 display\n");
+
+ regulator_disable(di->regulator);
+
+ return 0;
+}
+
+static int display_on(struct mcde_display_device *ddev)
+{
+ int ret;
+ u8 val = 0;
+
+ dev_dbg(&ddev->dev, "Display on s6d16d0\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_TEAR_ON, &val, 1);
+ if (ret)
+ dev_warn(&ddev->dev,
+ "%s:Failed to enable synchronized update\n", __func__);
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE,
+ NULL, 0);
+ if (ret)
+ return ret;
+ msleep(SLEEP_OUT_DELAY_MS);
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON,
+ NULL, 0);
+}
+
+static int display_off(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display off s6d16d0\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE,
+ NULL, 0);
+}
+
+static int set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ dev_dbg(&ddev->dev, "Set power mode %d\n", power_mode);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ ret = power_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = display_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+
+ ret = display_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ ret = power_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+}
+
+static int __devinit samsung_s6d16d0_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_display_dsi_platform_data *pdata = ddev->dev.platform_data;
+ struct device_info *di;
+
+ if (pdata == NULL || !pdata->reset_gpio) {
+ dev_err(&ddev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+ di->reset_gpio = pdata->reset_gpio;
+ di->port.link = pdata->link;
+ di->port.type = MCDE_PORTTYPE_DSI;
+ di->port.mode = MCDE_PORTMODE_CMD;
+ di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP;
+ di->port.sync_src = ddev->port->sync_src;
+ di->port.frame_trig = ddev->port->frame_trig;
+ di->port.phy.dsi.num_data_lanes = 2;
+ di->port.phy.dsi.host_eot_gen = true;
+ /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */
+ di->port.phy.dsi.ui = 9;
+ di->port.phy.dsi.hs_freq = DSI_HS_FREQ_HZ;
+ di->port.phy.dsi.lp_freq = DSI_LP_FREQ_HZ;
+
+ ret = gpio_request(di->reset_gpio, NULL);
+ if (ret)
+ goto gpio_request_failed;
+ gpio_direction_output(di->reset_gpio, 1);
+ di->regulator = regulator_get(&ddev->dev, IO_REGU);
+ if (IS_ERR(di->regulator)) {
+ di->regulator = NULL;
+ goto regulator_get_failed;
+ }
+ ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX);
+ if (WARN_ON(ret))
+ goto regulator_voltage_failed;
+
+ /* Get in sync with u-boot */
+ if (ddev->power_mode != MCDE_DISPLAY_PM_OFF)
+ (void)regulator_enable(di->regulator);
+
+ ddev->set_power_mode = set_power_mode;
+ ddev->port = &di->port;
+ ddev->native_x_res = 864;
+ ddev->native_y_res = 480;
+ dev_set_drvdata(&ddev->dev, di);
+
+ dev_info(&ddev->dev, "Samsung s6d16d0 display probed\n");
+
+ return 0;
+regulator_voltage_failed:
+ regulator_put(di->regulator);
+regulator_get_failed:
+ gpio_free(di->reset_gpio);
+gpio_request_failed:
+ kfree(di);
+ return ret;
+}
+
+static struct mcde_display_driver samsung_s6d16d0_driver = {
+ .probe = samsung_s6d16d0_probe,
+ .driver = {
+ .name = "samsung_s6d16d0",
+ },
+};
+
+static int __init samsung_s6d16d0_init(void)
+{
+ return mcde_display_driver_register(&samsung_s6d16d0_driver);
+}
+module_init(samsung_s6d16d0_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE Samsung S6D16D0 display driver");
diff --git a/drivers/video/mcde/display-sony_acx424akp_dsi.c b/drivers/video/mcde/display-sony_acx424akp_dsi.c
new file mode 100644
index 00000000000..867bbb37375
--- /dev/null
+++ b/drivers/video/mcde/display-sony_acx424akp_dsi.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE Sony acx424akp DCS display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-sony_acx424akp_dsi.h>
+
+#define RESET_DELAY_MS 11
+#define RESET_LOW_DELAY_US 20
+#define SLEEP_OUT_DELAY_MS 140
+#define SLEEP_IN_DELAY_MS 85 /* Assume 60 Hz 5 frames */
+#define IO_REGU "vddi"
+#define IO_REGU_MIN 1600000
+#define IO_REGU_MAX 3300000
+
+#define DSI_HS_FREQ_HZ 420160000
+#define DSI_LP_FREQ_HZ 19200000
+
+struct device_info {
+ int reset_gpio;
+ struct mcde_port port;
+ struct regulator *regulator;
+};
+
+static inline struct device_info *get_drvdata(struct mcde_display_device *ddev)
+{
+ return (struct device_info *)dev_get_drvdata(&ddev->dev);
+}
+
+static int display_read_deviceid(struct mcde_display_device *dev, u16 *id)
+{
+ struct mcde_chnl_state *chnl;
+
+ u8 id1, id2, id3;
+ int len = 1;
+ int ret = 0;
+ int readret = 0;
+
+ dev_dbg(&dev->dev, "%s: Read device id of the display\n", __func__);
+
+ /* Acquire MCDE resources */
+ chnl = mcde_chnl_get(dev->chnl_id, dev->fifo, dev->port);
+ if (IS_ERR(chnl)) {
+ ret = PTR_ERR(chnl);
+ dev_warn(&dev->dev, "Failed to acquire MCDE channel\n");
+ goto out;
+ }
+
+ /* plugnplay: use registers DA, DBh and DCh to detect display */
+ readret = mcde_dsi_dcs_read(chnl, 0xDA, (u32 *)&id1, &len);
+ if (!readret)
+ readret = mcde_dsi_dcs_read(chnl, 0xDB, (u32 *)&id2, &len);
+ if (!readret)
+ readret = mcde_dsi_dcs_read(chnl, 0xDC, (u32 *)&id3, &len);
+
+ if (readret) {
+ dev_info(&dev->dev,
+ "mcde_dsi_dcs_read failed to read display ID\n");
+ goto read_fail;
+ }
+
+ *id = (id3 << 8) | id2;
+read_fail:
+ /* close MCDE channel */
+ mcde_chnl_put(chnl);
+out:
+ return 0;
+}
+
+static int power_on(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s: Reset & power on sony display\n", __func__);
+
+ regulator_enable(di->regulator);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ udelay(RESET_LOW_DELAY_US);
+ gpio_set_value_cansleep(di->reset_gpio, 1);
+ msleep(RESET_DELAY_MS);
+
+ return 0;
+}
+
+static int power_off(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "%s:Reset & power off sony display\n", __func__);
+
+ gpio_set_value_cansleep(di->reset_gpio, 0);
+ msleep(RESET_DELAY_MS);
+ regulator_disable(di->regulator);
+
+ return 0;
+}
+
+static int display_on(struct mcde_display_device *ddev)
+{
+ int ret;
+ u8 val = 0;
+
+ dev_dbg(&ddev->dev, "Display on sony display\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_TEAR_ON, &val, 1);
+ if (ret)
+ dev_warn(&ddev->dev,
+ "%s:Failed to enable synchronized update\n", __func__);
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE,
+ NULL, 0);
+ if (ret)
+ return ret;
+ msleep(SLEEP_OUT_DELAY_MS);
+ return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON,
+ NULL, 0);
+}
+
+static int display_off(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ dev_dbg(&ddev->dev, "Display off sony display\n");
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE,
+ NULL, 0);
+ /* Wait for 4 frames or more */
+ msleep(SLEEP_IN_DELAY_MS);
+
+ return ret;
+}
+
+static int sony_acx424akp_set_scan_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+ u8 param[MCDE_MAX_DSI_DIRECT_CMD_WRITE];
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* 180 rotation for SONY ACX424AKP display */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY) {
+ param[0] = 0xAA;
+ ret = mcde_dsi_dcs_write(ddev->chnl_state, 0xf3, param, 1);
+ if (ret)
+ return ret;
+
+ param[0] = 0x00;
+ param[1] = 0x00;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xC9;
+ param[1] = 0x01;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xA2;
+ param[1] = 0x00;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+
+ param[0] = 0xFF;
+ param[1] = 0xAA;
+ ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int sony_acx424akp_set_power_mode(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__);
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ ret = power_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+
+ ret = display_on(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ }
+ /* ON -> STANDBY */
+ else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+
+ ret = display_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ /* STANDBY -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ ret = power_off(ddev);
+ if (ret)
+ return ret;
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ return mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+}
+
+static int __devinit sony_acx424akp_probe(struct mcde_display_device *dev)
+{
+ int ret = 0;
+ u16 id = 0;
+ struct device_info *di;
+ struct mcde_port *port;
+ struct mcde_display_sony_acx424akp_platform_data *pdata =
+ dev->dev.platform_data;
+
+ if (pdata == NULL || !pdata->reset_gpio) {
+ dev_err(&dev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ port = dev->port;
+ di->reset_gpio = pdata->reset_gpio;
+ di->port.type = MCDE_PORTTYPE_DSI;
+ di->port.mode = MCDE_PORTMODE_CMD;
+ di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP;
+ di->port.sync_src = dev->port->sync_src;
+ di->port.frame_trig = dev->port->frame_trig;
+ di->port.phy.dsi.num_data_lanes = 2;
+ di->port.link = port->link;
+ di->port.phy.dsi.host_eot_gen = true;
+ /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */
+ di->port.phy.dsi.ui = 9;
+ di->port.phy.dsi.hs_freq = DSI_HS_FREQ_HZ;
+ di->port.phy.dsi.lp_freq = DSI_LP_FREQ_HZ;
+
+ ret = gpio_request(di->reset_gpio, NULL);
+ if (WARN_ON(ret))
+ goto gpio_request_failed;
+
+ gpio_direction_output(di->reset_gpio, 1);
+ di->regulator = regulator_get(&dev->dev, IO_REGU);
+ if (IS_ERR(di->regulator)) {
+ ret = PTR_ERR(di->regulator);
+ di->regulator = NULL;
+ goto regulator_get_failed;
+ }
+ ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX);
+ if (WARN_ON(ret))
+ goto regulator_voltage_failed;
+
+ dev->set_power_mode = sony_acx424akp_set_power_mode;
+
+ dev->port = &di->port;
+ dev->native_x_res = 480;
+ dev->native_y_res = 854;
+ dev_set_drvdata(&dev->dev, di);
+
+ /*
+ * When u-boot has display a startup screen.
+ * U-boot has turned on display power however the
+ * regulator framework does not know about that
+ * This is the case here, the display driver has to
+ * enable the regulator for the display.
+ */
+ if (dev->power_mode != MCDE_DISPLAY_PM_OFF) {
+ (void) regulator_enable(di->regulator);
+ } else {
+ power_on(dev);
+ dev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+
+ ret = display_read_deviceid(dev, &id);
+ if (ret)
+ goto read_id_failed;
+
+ switch (id) {
+ case DISPLAY_SONY_ACX424AKP:
+ case DISPLAY_SONY_ACX424AKP_ID2:
+ pdata->disp_panel = id;
+ dev_info(&dev->dev,
+ "Sony ACX424AKP display (ID 0x%.4X) probed\n", id);
+ break;
+ default:
+ pdata->disp_panel = DISPLAY_NONE;
+ dev_info(&dev->dev,
+ "Display not recognized (ID 0x%.4X) probed\n", id);
+ goto read_id_failed;
+ }
+
+ return 0;
+
+read_id_failed:
+regulator_voltage_failed:
+ regulator_put(di->regulator);
+regulator_get_failed:
+ gpio_free(di->reset_gpio);
+gpio_request_failed:
+ kfree(di);
+ return ret;
+}
+
+static int __devexit sony_acx424akp_remove(struct mcde_display_device *dev)
+{
+ struct device_info *di = get_drvdata(dev);
+
+ dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+
+ regulator_put(di->regulator);
+ gpio_direction_input(di->reset_gpio);
+ gpio_free(di->reset_gpio);
+
+ kfree(di);
+
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int sony_acx424akp_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ return ret;
+}
+
+static int sony_acx424akp_suspend(struct mcde_display_device *ddev, \
+ pm_message_t state)
+{
+ int ret;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+#endif
+
+static struct mcde_display_driver sony_acx424akp_driver = {
+ .probe = sony_acx424akp_probe,
+ .remove = sony_acx424akp_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = sony_acx424akp_suspend,
+ .resume = sony_acx424akp_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde_disp_sony_acx424akp",
+ },
+};
+
+/* Module init */
+static int __init mcde_display_sony_acx424akp_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&sony_acx424akp_driver);
+}
+module_init(mcde_display_sony_acx424akp_init);
+
+static void __exit mcde_display_sony_acx424akp_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&sony_acx424akp_driver);
+}
+module_exit(mcde_display_sony_acx424akp_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE Sony ACX424AKP DCS display driver");
diff --git a/drivers/video/mcde/display-vuib500-dpi.c b/drivers/video/mcde/display-vuib500-dpi.c
new file mode 100644
index 00000000000..2bd5b990608
--- /dev/null
+++ b/drivers/video/mcde/display-vuib500-dpi.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE DPI display driver
+ * The VUIB500 is an user interface board the can be attached to an HREF. It
+ * supports the DPI pixel interface and converts this to an analog VGA signal,
+ * which can be connected to a monitor using a DSUB connector. The VUIB board
+ * uses an external power supply of 5V.
+ *
+ * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_display-vuib500-dpi.h>
+
+#define DPI_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__)
+
+static int try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+static int set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode);
+
+static int __devinit dpi_display_probe(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_display_dpi_platform_data *pdata = ddev->dev.platform_data;
+ DPI_DISP_TRACE;
+
+ if (pdata == NULL) {
+ dev_err(&ddev->dev, "%s:Platform data missing\n", __func__);
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ if (ddev->port->type != MCDE_PORTTYPE_DPI) {
+ dev_err(&ddev->dev,
+ "%s:Invalid port type %d\n",
+ __func__, ddev->port->type);
+ ret = -EINVAL;
+ goto invalid_port_type;
+ }
+
+ ddev->try_video_mode = try_video_mode;
+ ddev->set_video_mode = set_video_mode;
+ dev_info(&ddev->dev, "DPI display probed\n");
+
+ goto out;
+invalid_port_type:
+no_pdata:
+out:
+ return ret;
+}
+
+static int __devexit dpi_display_remove(struct mcde_display_device *ddev)
+{
+ DPI_DISP_TRACE;
+
+ ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ return 0;
+}
+
+static int dpi_display_resume(struct mcde_display_device *ddev)
+{
+ int ret;
+ DPI_DISP_TRACE;
+
+ /* set_power_mode will handle call platform_enable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to resume display\n"
+ , __func__);
+ return ret;
+}
+
+static int dpi_display_suspend(struct mcde_display_device *ddev,
+ pm_message_t state)
+{
+ int ret;
+ DPI_DISP_TRACE;
+
+ /* set_power_mode will handle call platform_disable */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+ if (ret < 0)
+ dev_warn(&ddev->dev, "%s:Failed to suspend display\n"
+ , __func__);
+ return ret;
+}
+
+static void print_vmode(struct mcde_video_mode *vmode)
+{
+ pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres);
+ pr_debug(" pixclock: %d\n", vmode->pixclock);
+ pr_debug(" hbp: %d\n", vmode->hbp);
+ pr_debug(" hfp: %d\n", vmode->hfp);
+ pr_debug(" hsw: %d\n", vmode->hsw);
+ pr_debug(" vbp: %d\n", vmode->vbp);
+ pr_debug(" vfp: %d\n", vmode->vfp);
+ pr_debug(" vsw: %d\n", vmode->vsw);
+ pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false");
+}
+
+/* Taken from the programmed value of the LCD clock in PRCMU */
+#define PIX_CLK_FREQ 25000000
+#define VMODE_XRES 640
+#define VMODE_YRES 480
+
+static int try_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res = -EINVAL;
+ DPI_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return res;
+ }
+
+ if (video_mode->xres == VMODE_XRES && video_mode->yres == VMODE_YRES) {
+ video_mode->hbp = 40;
+ video_mode->hfp = 8;
+ video_mode->hsw = 96;
+ video_mode->vbp = 25;
+ video_mode->vfp = 2;
+ video_mode->vsw = 2;
+ /*
+ * The pixclock setting is not used within MCDE. The clock is
+ * setup elsewhere. But the pixclock value is visible in user
+ * space.
+ */
+ video_mode->pixclock = (int) (1e+12 * (1.0 / PIX_CLK_FREQ));
+ res = 0;
+ } /* TODO: add more supported resolutions here */
+ video_mode->interlaced = false;
+
+ if (res == 0)
+ print_vmode(video_mode);
+ else
+ dev_warn(&ddev->dev,
+ "%s:Failed to find video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+
+ return res;
+
+}
+
+static int set_video_mode(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ int res;
+ DPI_DISP_TRACE;
+
+ if (ddev == NULL || video_mode == NULL) {
+ dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (video_mode->xres != VMODE_XRES || video_mode->yres != VMODE_YRES) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n",
+ __func__, video_mode->xres, video_mode->yres);
+ return -EINVAL;
+ }
+ ddev->video_mode = *video_mode;
+ print_vmode(video_mode);
+
+ res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode);
+ if (res < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n",
+ __func__);
+
+ }
+ /* notify mcde display driver about updated video mode */
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+ return res;
+}
+
+static struct mcde_display_driver dpi_display_driver = {
+ .probe = dpi_display_probe,
+ .remove = dpi_display_remove,
+ .suspend = dpi_display_suspend,
+ .resume = dpi_display_resume,
+ .driver = {
+ .name = "mcde_display_dpi",
+ },
+};
+
+/* Module init */
+static int __init mcde_dpi_display_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ return mcde_display_driver_register(&dpi_display_driver);
+}
+module_init(mcde_dpi_display_init);
+
+static void __exit mcde_dpi_display_exit(void)
+{
+ pr_info("%s\n", __func__);
+
+ mcde_display_driver_unregister(&dpi_display_driver);
+}
+module_exit(mcde_dpi_display_exit);
+
+MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE DPI display driver fro VUIB500 display");
diff --git a/drivers/video/mcde/dsilink_regs.h b/drivers/video/mcde/dsilink_regs.h
new file mode 100644
index 00000000000..29fa75a1752
--- /dev/null
+++ b/drivers/video/mcde/dsilink_regs.h
@@ -0,0 +1,2037 @@
+
+#define DSI_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define DSI_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define DSI_MCTL_INTEGRATION_MODE 0x00000000
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_SHIFT 0
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_MASK 0x00000001
+#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_INTEGRATION_MODE, INT_MODE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_SHIFT 0
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, LINK_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_SHIFT 1
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_MASK 0x00000002
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_CMD 0
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_VID 1
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, \
+ DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_##__x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_SHIFT 2
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_MASK 0x00000004
+#define DSI_MCTL_MAIN_DATA_CTL_VID_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, VID_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_SHIFT 3
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_MASK 0x00000008
+#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_SHIFT 4
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_MASK 0x00000010
+#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TBG_SEL, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_SHIFT 5
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_SHIFT 6
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_MASK 0x00000040
+#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_SHIFT 7
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_MASK 0x00000080
+#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_SHIFT 8
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_MASK 0x00000100
+#define DSI_MCTL_MAIN_DATA_CTL_READ_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, READ_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_SHIFT 9
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_MASK 0x00000200
+#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, BTA_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_SHIFT 10
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_MASK 0x00000400
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_ECC, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_SHIFT 11
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_MASK 0x00000800
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_CHECKSUM, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_SHIFT 12
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_MASK 0x00001000
+#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_SHIFT 13
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_MASK 0x00002000
+#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_EOT_GEN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_SHIFT 14
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_MASK 0x00004000
+#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN, __x)
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_SHIFT 15
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_MASK 0x00008000
+#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_SHIFT 0
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, LANE2_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_SHIFT 1
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_MASK 0x00000002
+#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_SHIFT 2
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_MASK 0x00000004
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_SHIFT 3
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_SHIFT 4
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT1_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_SHIFT 5
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT2_ULPM_EN, __x)
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT 6
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_MASK 0x000003C0
+#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, WAIT_BURST_TIME, __x)
+#define DSI_MCTL_PLL_CTL 0x0000000C
+#define DSI_MCTL_PLL_CTL_PLL_MULT_SHIFT 0
+#define DSI_MCTL_PLL_CTL_PLL_MULT_MASK 0x000000FF
+#define DSI_MCTL_PLL_CTL_PLL_MULT(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MULT, __x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_SHIFT 8
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_MASK 0x00003F00
+#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_DIV, __x)
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_SHIFT 14
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_MASK 0x0001C000
+#define DSI_MCTL_PLL_CTL_PLL_IN_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_IN_DIV, __x)
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_SHIFT 17
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_MASK 0x00020000
+#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_SEL_DIV2, __x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SHIFT 18
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_MASK 0x00040000
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_INT_PLL 0
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SYS_PLL 1
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, \
+ DSI_MCTL_PLL_CTL_PLL_OUT_SEL_##__x)
+#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, __x)
+#define DSI_MCTL_PLL_CTL_PLL_MASTER_SHIFT 31
+#define DSI_MCTL_PLL_CTL_PLL_MASTER_MASK 0x80000000
+#define DSI_MCTL_PLL_CTL_PLL_MASTER(__x) \
+ DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MASTER, __x)
+#define DSI_MCTL_LANE_STS 0x00000010
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_SHIFT 0
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_MASK 0x00000003
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_START 0
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_HS 2
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, \
+ DSI_MCTL_LANE_STS_CLKLANE_STATE_##__x)
+#define DSI_MCTL_LANE_STS_CLKLANE_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, __x)
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_SHIFT 2
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_MASK 0x0000001C
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_START 0
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_WRITE 2
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_READ 4
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, \
+ DSI_MCTL_LANE_STS_DATLANE1_STATE_##__x)
+#define DSI_MCTL_LANE_STS_DATLANE1_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, __x)
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_SHIFT 5
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_MASK 0x00000060
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_START 0
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_IDLE 1
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_WRITE 2
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ULPM 3
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ENUM(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, \
+ DSI_MCTL_LANE_STS_DATLANE2_STATE_##__x)
+#define DSI_MCTL_LANE_STS_DATLANE2_STATE(__x) \
+ DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, __x)
+#define DSI_MCTL_DPHY_TIMEOUT 0x00000014
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT 0
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_MASK 0x0000000F
+#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, CLK_DIV, __x)
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT 4
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_MASK 0x0003FFF0
+#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, HSTX_TO_VAL, __x)
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT 18
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_MASK 0xFFFC0000
+#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, LPRX_TO_VAL, __x)
+#define DSI_MCTL_ULPOUT_TIME 0x00000018
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT 0
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_MASK 0x000001FF
+#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, CKLANE_ULPOUT_TIME, __x)
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT 9
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_MASK 0x0003FE00
+#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(__x) \
+ DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, DATA_ULPOUT_TIME, __x)
+#define DSI_MCTL_DPHY_STATIC 0x0000001C
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_SHIFT 0
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_MASK 0x00000001
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_CLK, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_SHIFT 1
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_MASK 0x00000002
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_CLK, __x)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_SHIFT 2
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_MASK 0x00000004
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT1, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_SHIFT 3
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_MASK 0x00000008
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT1, __x)
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_SHIFT 4
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_MASK 0x00000010
+#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT2, __x)
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_SHIFT 5
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_MASK 0x00000020
+#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT2, __x)
+#define DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT 6
+#define DSI_MCTL_DPHY_STATIC_UI_X4_MASK 0x00000FC0
+#define DSI_MCTL_DPHY_STATIC_UI_X4(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, UI_X4, __x)
+#define DSI_MCTL_MAIN_EN 0x00000020
+#define DSI_MCTL_MAIN_EN_PLL_START_SHIFT 0
+#define DSI_MCTL_MAIN_EN_PLL_START_MASK 0x00000001
+#define DSI_MCTL_MAIN_EN_PLL_START(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, PLL_START, __x)
+#define DSI_MCTL_MAIN_EN_CKLANE_EN_SHIFT 3
+#define DSI_MCTL_MAIN_EN_CKLANE_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_EN_CKLANE_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, CKLANE_EN, __x)
+#define DSI_MCTL_MAIN_EN_DAT1_EN_SHIFT 4
+#define DSI_MCTL_MAIN_EN_DAT1_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_EN_DAT1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_EN, __x)
+#define DSI_MCTL_MAIN_EN_DAT2_EN_SHIFT 5
+#define DSI_MCTL_MAIN_EN_DAT2_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_EN_DAT2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_EN, __x)
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_SHIFT 6
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_MASK 0x00000040
+#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_SHIFT 7
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_MASK 0x00000080
+#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_SHIFT 8
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_MASK 0x00000100
+#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ, __x)
+#define DSI_MCTL_MAIN_EN_IF1_EN_SHIFT 9
+#define DSI_MCTL_MAIN_EN_IF1_EN_MASK 0x00000200
+#define DSI_MCTL_MAIN_EN_IF1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF1_EN, __x)
+#define DSI_MCTL_MAIN_EN_IF2_EN_SHIFT 10
+#define DSI_MCTL_MAIN_EN_IF2_EN_MASK 0x00000400
+#define DSI_MCTL_MAIN_EN_IF2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF2_EN, __x)
+#define DSI_MCTL_MAIN_STS 0x00000024
+#define DSI_MCTL_MAIN_STS_PLL_LOCK_SHIFT 0
+#define DSI_MCTL_MAIN_STS_PLL_LOCK_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_PLL_LOCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, PLL_LOCK, __x)
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CLKLANE_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, CLKLANE_READY, __x)
+#define DSI_MCTL_MAIN_STS_DAT1_READY_SHIFT 2
+#define DSI_MCTL_MAIN_STS_DAT1_READY_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_DAT1_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT1_READY, __x)
+#define DSI_MCTL_MAIN_STS_DAT2_READY_SHIFT 3
+#define DSI_MCTL_MAIN_STS_DAT2_READY_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_DAT2_READY(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT2_READY, __x)
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_SHIFT 4
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, HSTX_TO_ERR, __x)
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_SHIFT 5
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, LPRX_TO_ERR, __x)
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, CRS_UNTERM_PCK, __x)
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_SHIFT 7
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS, VRS_UNTERM_PCK, __x)
+#define DSI_MCTL_DPHY_ERR 0x00000028
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_ERR_ESC_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_2, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_1, __x)
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_2, __x)
+#define DSI_INT_VID_RDDATA 0x00000030
+#define DSI_INT_VID_RDDATA_IF_DATA_SHIFT 0
+#define DSI_INT_VID_RDDATA_IF_DATA_MASK 0x0000FFFF
+#define DSI_INT_VID_RDDATA_IF_DATA(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_DATA, __x)
+#define DSI_INT_VID_RDDATA_IF_VALID_SHIFT 16
+#define DSI_INT_VID_RDDATA_IF_VALID_MASK 0x00010000
+#define DSI_INT_VID_RDDATA_IF_VALID(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_VALID, __x)
+#define DSI_INT_VID_RDDATA_IF_START_SHIFT 17
+#define DSI_INT_VID_RDDATA_IF_START_MASK 0x00020000
+#define DSI_INT_VID_RDDATA_IF_START(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_START, __x)
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_SHIFT 18
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_MASK 0x00040000
+#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC(__x) \
+ DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_FRAME_SYNC, __x)
+#define DSI_INT_VID_GNT 0x00000034
+#define DSI_INT_VID_GNT_IF_STALL_SHIFT 0
+#define DSI_INT_VID_GNT_IF_STALL_MASK 0x00000001
+#define DSI_INT_VID_GNT_IF_STALL(__x) \
+ DSI_VAL2REG(DSI_INT_VID_GNT, IF_STALL, __x)
+#define DSI_INT_CMD_RDDATA 0x00000038
+#define DSI_INT_CMD_RDDATA_IF_DATA_SHIFT 0
+#define DSI_INT_CMD_RDDATA_IF_DATA_MASK 0x0000FFFF
+#define DSI_INT_CMD_RDDATA_IF_DATA(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_DATA, __x)
+#define DSI_INT_CMD_RDDATA_IF_VALID_SHIFT 16
+#define DSI_INT_CMD_RDDATA_IF_VALID_MASK 0x00010000
+#define DSI_INT_CMD_RDDATA_IF_VALID(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_VALID, __x)
+#define DSI_INT_CMD_RDDATA_IF_START_SHIFT 17
+#define DSI_INT_CMD_RDDATA_IF_START_MASK 0x00020000
+#define DSI_INT_CMD_RDDATA_IF_START(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_START, __x)
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_SHIFT 18
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_MASK 0x00040000
+#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_FRAME_SYNC, __x)
+#define DSI_INT_CMD_GNT 0x0000003C
+#define DSI_INT_CMD_GNT_IF_STALL_SHIFT 0
+#define DSI_INT_CMD_GNT_IF_STALL_MASK 0x00000001
+#define DSI_INT_CMD_GNT_IF_STALL(__x) \
+ DSI_VAL2REG(DSI_INT_CMD_GNT, IF_STALL, __x)
+#define DSI_INT_INTERRUPT_CTL 0x00000040
+#define DSI_INT_INTERRUPT_CTL_INT_VAL_SHIFT 0
+#define DSI_INT_INTERRUPT_CTL_INT_VAL_MASK 0x00000001
+#define DSI_INT_INTERRUPT_CTL_INT_VAL(__x) \
+ DSI_VAL2REG(DSI_INT_INTERRUPT_CTL, INT_VAL, __x)
+#define DSI_CMD_MODE_CTL 0x00000050
+#define DSI_CMD_MODE_CTL_IF1_ID_SHIFT 0
+#define DSI_CMD_MODE_CTL_IF1_ID_MASK 0x00000003
+#define DSI_CMD_MODE_CTL_IF1_ID(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_ID, __x)
+#define DSI_CMD_MODE_CTL_IF2_ID_SHIFT 2
+#define DSI_CMD_MODE_CTL_IF2_ID_MASK 0x0000000C
+#define DSI_CMD_MODE_CTL_IF2_ID(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_ID, __x)
+#define DSI_CMD_MODE_CTL_IF1_LP_EN_SHIFT 4
+#define DSI_CMD_MODE_CTL_IF1_LP_EN_MASK 0x00000010
+#define DSI_CMD_MODE_CTL_IF1_LP_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_LP_EN, __x)
+#define DSI_CMD_MODE_CTL_IF2_LP_EN_SHIFT 5
+#define DSI_CMD_MODE_CTL_IF2_LP_EN_MASK 0x00000020
+#define DSI_CMD_MODE_CTL_IF2_LP_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_LP_EN, __x)
+#define DSI_CMD_MODE_CTL_ARB_MODE_SHIFT 6
+#define DSI_CMD_MODE_CTL_ARB_MODE_MASK 0x00000040
+#define DSI_CMD_MODE_CTL_ARB_MODE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_MODE, __x)
+#define DSI_CMD_MODE_CTL_ARB_PRI_SHIFT 7
+#define DSI_CMD_MODE_CTL_ARB_PRI_MASK 0x00000080
+#define DSI_CMD_MODE_CTL_ARB_PRI(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_PRI, __x)
+#define DSI_CMD_MODE_CTL_FIL_VALUE_SHIFT 8
+#define DSI_CMD_MODE_CTL_FIL_VALUE_MASK 0x0000FF00
+#define DSI_CMD_MODE_CTL_FIL_VALUE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, FIL_VALUE, __x)
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT 16
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT_MASK 0x03FF0000
+#define DSI_CMD_MODE_CTL_TE_TIMEOUT(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_CTL, TE_TIMEOUT, __x)
+#define DSI_CMD_MODE_STS 0x00000054
+#define DSI_CMD_MODE_STS_ERR_NO_TE_SHIFT 0
+#define DSI_CMD_MODE_STS_ERR_NO_TE_MASK 0x00000001
+#define DSI_CMD_MODE_STS_ERR_NO_TE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_NO_TE, __x)
+#define DSI_CMD_MODE_STS_ERR_TE_MISS_SHIFT 1
+#define DSI_CMD_MODE_STS_ERR_TE_MISS_MASK 0x00000002
+#define DSI_CMD_MODE_STS_ERR_TE_MISS(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_TE_MISS, __x)
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_SHIFT 2
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_MASK 0x00000004
+#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI1_UNDERRUN, __x)
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_SHIFT 3
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_MASK 0x00000008
+#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI2_UNDERRUN, __x)
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_SHIFT 4
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_MASK 0x00000010
+#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_UNWANTED_RD, __x)
+#define DSI_CMD_MODE_STS_CSM_RUNNING_SHIFT 5
+#define DSI_CMD_MODE_STS_CSM_RUNNING_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CSM_RUNNING(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS, CSM_RUNNING, __x)
+#define DSI_DIRECT_CMD_SEND 0x00000060
+#define DSI_DIRECT_CMD_SEND_START_SHIFT 0
+#define DSI_DIRECT_CMD_SEND_START_MASK 0xFFFFFFFF
+#define DSI_DIRECT_CMD_SEND_START(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_SEND, START, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS 0x00000064
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_SHIFT 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_MASK 0x00000007
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE 0
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ 1
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ 4
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TRIG_REQ 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_BTA_REQ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, \
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_##__x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_SHIFT 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_MASK 0x00000008
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LONGNOTSHORT, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, \
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_##__x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_MASK 0x0000C000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_ID, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_MASK 0x001F0000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_SIZE, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_SHIFT 21
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_MASK 0x00200000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LP_EN, __x)
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_SHIFT 24
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_MASK 0x0F000000
+#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, TRIGGER_VAL, __x)
+#define DSI_DIRECT_CMD_STS 0x00000068
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, CMD_TRANSMISSION, __x)
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_SHIFT 1
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, WRITE_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_SHIFT 2
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_SHIFT 3
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_SHIFT 4
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_SHIFT 5
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_WITH_ERR_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_SHIFT 6
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED_SHIFT 7
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_TE_RECEIVED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TE_RECEIVED, __x)
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_SHIFT 8
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_BTA_COMPLETED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_COMPLETED, __x)
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED_SHIFT 9
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_BTA_FINISHED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_FINISHED, __x)
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_SHIFT 10
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED_WITH_ERR, __x)
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_SHIFT 11
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_MASK 0x00007800
+#define DSI_DIRECT_CMD_STS_TRIGGER_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_VAL, __x)
+#define DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT 16
+#define DSI_DIRECT_CMD_STS_ACK_VAL_MASK 0xFFFF0000
+#define DSI_DIRECT_CMD_STS_ACK_VAL(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACK_VAL, __x)
+#define DSI_DIRECT_CMD_RD_INIT 0x0000006C
+#define DSI_DIRECT_CMD_RD_INIT_RESET_SHIFT 0
+#define DSI_DIRECT_CMD_RD_INIT_RESET_MASK 0xFFFFFFFF
+#define DSI_DIRECT_CMD_RD_INIT_RESET(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_INIT, RESET, __x)
+#define DSI_DIRECT_CMD_WRDAT0 0x00000070
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT0(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT0, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT1(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT1, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT2(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT2, __x)
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT0_WRDAT3(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT3, __x)
+#define DSI_DIRECT_CMD_WRDAT1 0x00000074
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT4(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT4, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT5(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT5, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT6(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT6, __x)
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT1_WRDAT7(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT7, __x)
+#define DSI_DIRECT_CMD_WRDAT2 0x00000078
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT8(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT8, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT9(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT9, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT10(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT10, __x)
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT2_WRDAT11(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT11, __x)
+#define DSI_DIRECT_CMD_WRDAT3 0x0000007C
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_SHIFT 0
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_MASK 0x000000FF
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT12(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT12, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_SHIFT 8
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT13(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT13, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_SHIFT 16
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT14(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT14, __x)
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_SHIFT 24
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_MASK 0xFF000000
+#define DSI_DIRECT_CMD_WRDAT3_WRDAT15(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT15, __x)
+#define DSI_DIRECT_CMD_RDDAT 0x00000080
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0_SHIFT 0
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0_MASK 0x000000FF
+#define DSI_DIRECT_CMD_RDDAT_RDDAT0(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT0, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1_SHIFT 8
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1_MASK 0x0000FF00
+#define DSI_DIRECT_CMD_RDDAT_RDDAT1(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT1, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2_SHIFT 16
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2_MASK 0x00FF0000
+#define DSI_DIRECT_CMD_RDDAT_RDDAT2(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT2, __x)
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3_SHIFT 24
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3_MASK 0xFF000000
+#define DSI_DIRECT_CMD_RDDAT_RDDAT3(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT3, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY 0x00000084
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_SHIFT 0
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK 0x0000FFFF
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_SHIFT 16
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_MASK 0x00030000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_ID, __x)
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_SHIFT 18
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_MASK 0x00040000
+#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_DCSNOTGENERIC, __x)
+#define DSI_DIRECT_CMD_RD_STS 0x00000088
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_FIXED, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNCORRECTABLE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_CHECKSUM, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNDECODABLE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_RECEIVE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_OVERSIZE, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_WRONG_LENGTH, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_MISSING_EOT, __x)
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_EOT_WITH_ERR, __x)
+#define DSI_VID_MAIN_CTL 0x00000090
+#define DSI_VID_MAIN_CTL_START_MODE_SHIFT 0
+#define DSI_VID_MAIN_CTL_START_MODE_MASK 0x00000003
+#define DSI_VID_MAIN_CTL_START_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, START_MODE, __x)
+#define DSI_VID_MAIN_CTL_STOP_MODE_SHIFT 2
+#define DSI_VID_MAIN_CTL_STOP_MODE_MASK 0x0000000C
+#define DSI_VID_MAIN_CTL_STOP_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, STOP_MODE, __x)
+#define DSI_VID_MAIN_CTL_VID_ID_SHIFT 4
+#define DSI_VID_MAIN_CTL_VID_ID_MASK 0x00000030
+#define DSI_VID_MAIN_CTL_VID_ID(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_ID, __x)
+#define DSI_VID_MAIN_CTL_HEADER_SHIFT 6
+#define DSI_VID_MAIN_CTL_HEADER_MASK 0x00000FC0
+#define DSI_VID_MAIN_CTL_HEADER(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, HEADER, __x)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_SHIFT 12
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_MASK 0x00003000
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS 0
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS 1
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE 2
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS 3
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, \
+ DSI_VID_MAIN_CTL_VID_PIXEL_MODE_##__x)
+#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, __x)
+#define DSI_VID_MAIN_CTL_BURST_MODE_SHIFT 14
+#define DSI_VID_MAIN_CTL_BURST_MODE_MASK 0x00004000
+#define DSI_VID_MAIN_CTL_BURST_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, BURST_MODE, __x)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_SHIFT 15
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_MASK 0x00008000
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, __x)
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_SHIFT 16
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_MASK 0x00010000
+#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, __x)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_SHIFT 17
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_MASK 0x00060000
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING 1
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0 2
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_1 3
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, \
+ DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_##__x)
+#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, __x)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_SHIFT 19
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_MASK 0x00180000
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_NULL 0
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_BLANKING 1
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 2
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_1 3
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, \
+ DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_##__x)
+#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, __x)
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT 21
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE_MASK 0x00600000
+#define DSI_VID_MAIN_CTL_RECOVERY_MODE(__x) \
+ DSI_VAL2REG(DSI_VID_MAIN_CTL, RECOVERY_MODE, __x)
+#define DSI_VID_VSIZE 0x00000094
+#define DSI_VID_VSIZE_VSA_LENGTH_SHIFT 0
+#define DSI_VID_VSIZE_VSA_LENGTH_MASK 0x0000003F
+#define DSI_VID_VSIZE_VSA_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VSA_LENGTH, __x)
+#define DSI_VID_VSIZE_VBP_LENGTH_SHIFT 6
+#define DSI_VID_VSIZE_VBP_LENGTH_MASK 0x00000FC0
+#define DSI_VID_VSIZE_VBP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VBP_LENGTH, __x)
+#define DSI_VID_VSIZE_VFP_LENGTH_SHIFT 12
+#define DSI_VID_VSIZE_VFP_LENGTH_MASK 0x000FF000
+#define DSI_VID_VSIZE_VFP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VFP_LENGTH, __x)
+#define DSI_VID_VSIZE_VACT_LENGTH_SHIFT 20
+#define DSI_VID_VSIZE_VACT_LENGTH_MASK 0x7FF00000
+#define DSI_VID_VSIZE_VACT_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_VSIZE, VACT_LENGTH, __x)
+#define DSI_VID_HSIZE1 0x00000098
+#define DSI_VID_HSIZE1_HSA_LENGTH_SHIFT 0
+#define DSI_VID_HSIZE1_HSA_LENGTH_MASK 0x000003FF
+#define DSI_VID_HSIZE1_HSA_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HSA_LENGTH, __x)
+#define DSI_VID_HSIZE1_HBP_LENGTH_SHIFT 10
+#define DSI_VID_HSIZE1_HBP_LENGTH_MASK 0x000FFC00
+#define DSI_VID_HSIZE1_HBP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HBP_LENGTH, __x)
+#define DSI_VID_HSIZE1_HFP_LENGTH_SHIFT 20
+#define DSI_VID_HSIZE1_HFP_LENGTH_MASK 0x7FF00000
+#define DSI_VID_HSIZE1_HFP_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE1, HFP_LENGTH, __x)
+#define DSI_VID_HSIZE2 0x0000009C
+#define DSI_VID_HSIZE2_RGB_SIZE_SHIFT 0
+#define DSI_VID_HSIZE2_RGB_SIZE_MASK 0x00001FFF
+#define DSI_VID_HSIZE2_RGB_SIZE(__x) \
+ DSI_VAL2REG(DSI_VID_HSIZE2, RGB_SIZE, __x)
+#define DSI_VID_BLKSIZE1 0x000000A0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK, __x)
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT 13
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK 0x03FFE000
+#define DSI_VID_BLKSIZE1_BLKEOL_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKEOL_PCK, __x)
+#define DSI_VID_BLKSIZE2 0x000000A4
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT 0
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_MASK 0x00001FFF
+#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK(__x) \
+ DSI_VAL2REG(DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK, __x)
+#define DSI_VID_PCK_TIME 0x000000A8
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00001FFF
+#define DSI_VID_PCK_TIME_BLKEOL_DURATION(__x) \
+ DSI_VAL2REG(DSI_VID_PCK_TIME, BLKEOL_DURATION, __x)
+#define DSI_VID_DPHY_TIME 0x000000AC
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_MASK 0x00001FFF
+#define DSI_VID_DPHY_TIME_REG_LINE_DURATION(__x) \
+ DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_LINE_DURATION, __x)
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT 13
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_MASK 0x00FFE000
+#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME(__x) \
+ DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_WAKEUP_TIME, __x)
+#define DSI_VID_ERR_COLOR 0x000000B0
+#define DSI_VID_ERR_COLOR_COL_RED_SHIFT 0
+#define DSI_VID_ERR_COLOR_COL_RED_MASK 0x000000FF
+#define DSI_VID_ERR_COLOR_COL_RED(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_RED, __x)
+#define DSI_VID_ERR_COLOR_COL_GREEN_SHIFT 8
+#define DSI_VID_ERR_COLOR_COL_GREEN_MASK 0x0000FF00
+#define DSI_VID_ERR_COLOR_COL_GREEN(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_GREEN, __x)
+#define DSI_VID_ERR_COLOR_COL_BLUE_SHIFT 16
+#define DSI_VID_ERR_COLOR_COL_BLUE_MASK 0x00FF0000
+#define DSI_VID_ERR_COLOR_COL_BLUE(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_BLUE, __x)
+#define DSI_VID_ERR_COLOR_PAD_VAL_SHIFT 24
+#define DSI_VID_ERR_COLOR_PAD_VAL_MASK 0xFF000000
+#define DSI_VID_ERR_COLOR_PAD_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_ERR_COLOR, PAD_VAL, __x)
+#define DSI_VID_VPOS 0x000000B4
+#define DSI_VID_VPOS_LINE_POS_SHIFT 0
+#define DSI_VID_VPOS_LINE_POS_MASK 0x00000003
+#define DSI_VID_VPOS_LINE_POS(__x) \
+ DSI_VAL2REG(DSI_VID_VPOS, LINE_POS, __x)
+#define DSI_VID_VPOS_LINE_VAL_SHIFT 2
+#define DSI_VID_VPOS_LINE_VAL_MASK 0x00001FFC
+#define DSI_VID_VPOS_LINE_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_VPOS, LINE_VAL, __x)
+#define DSI_VID_HPOS 0x000000B8
+#define DSI_VID_HPOS_HORIZONTAL_POS_SHIFT 0
+#define DSI_VID_HPOS_HORIZONTAL_POS_MASK 0x00000007
+#define DSI_VID_HPOS_HORIZONTAL_POS(__x) \
+ DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_POS, __x)
+#define DSI_VID_HPOS_HORIZONTAL_VAL_SHIFT 3
+#define DSI_VID_HPOS_HORIZONTAL_VAL_MASK 0x0000FFF8
+#define DSI_VID_HPOS_HORIZONTAL_VAL(__x) \
+ DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_VAL, __x)
+#define DSI_VID_MODE_STS 0x000000BC
+#define DSI_VID_MODE_STS_VSG_RUNNING_SHIFT 0
+#define DSI_VID_MODE_STS_VSG_RUNNING_MASK 0x00000001
+#define DSI_VID_MODE_STS_VSG_RUNNING(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RUNNING, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA_SHIFT 1
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA_MASK 0x00000002
+#define DSI_VID_MODE_STS_ERR_MISSING_DATA(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_DATA, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_SHIFT 2
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_MASK 0x00000004
+#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_HSYNC, __x)
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_SHIFT 3
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_MASK 0x00000008
+#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_VSYNC, __x)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_SHIFT 4
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_MASK 0x00000010
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_LENGTH, __x)
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_SHIFT 5
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_MASK 0x00000020
+#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_HEIGHT, __x)
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE_SHIFT 6
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE_MASK 0x00000040
+#define DSI_VID_MODE_STS_ERR_BURSTWRITE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_BURSTWRITE, __x)
+#define DSI_VID_MODE_STS_ERR_LONGWRITE_SHIFT 7
+#define DSI_VID_MODE_STS_ERR_LONGWRITE_MASK 0x00000080
+#define DSI_VID_MODE_STS_ERR_LONGWRITE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGWRITE, __x)
+#define DSI_VID_MODE_STS_ERR_LONGREAD_SHIFT 8
+#define DSI_VID_MODE_STS_ERR_LONGREAD_MASK 0x00000100
+#define DSI_VID_MODE_STS_ERR_LONGREAD(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGREAD, __x)
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_SHIFT 9
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_MASK 0x00000200
+#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, ERR_VRS_WRONG_LENGTH, __x)
+#define DSI_VID_MODE_STS_VSG_RECOVERY_SHIFT 10
+#define DSI_VID_MODE_STS_VSG_RECOVERY_MASK 0x00000400
+#define DSI_VID_MODE_STS_VSG_RECOVERY(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RECOVERY, __x)
+#define DSI_VID_VCA_SETTING1 0x000000C0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT, __x)
+#define DSI_VID_VCA_SETTING1_BURST_LP_SHIFT 16
+#define DSI_VID_VCA_SETTING1_BURST_LP_MASK 0x00010000
+#define DSI_VID_VCA_SETTING1_BURST_LP(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING1, BURST_LP, __x)
+#define DSI_VID_VCA_SETTING2 0x000000C4
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT 0
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK 0x0000FFFF
+#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT, __x)
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT 16
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK 0xFFFF0000
+#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT(__x) \
+ DSI_VAL2REG(DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT, __x)
+#define DSI_TVG_CTL 0x000000C8
+#define DSI_TVG_CTL_TVG_RUN_SHIFT 0
+#define DSI_TVG_CTL_TVG_RUN_MASK 0x00000001
+#define DSI_TVG_CTL_TVG_RUN(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_RUN, __x)
+#define DSI_TVG_CTL_TVG_STOPMODE_SHIFT 1
+#define DSI_TVG_CTL_TVG_STOPMODE_MASK 0x00000006
+#define DSI_TVG_CTL_TVG_STOPMODE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_STOPMODE, __x)
+#define DSI_TVG_CTL_TVG_MODE_SHIFT 3
+#define DSI_TVG_CTL_TVG_MODE_MASK 0x00000018
+#define DSI_TVG_CTL_TVG_MODE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_MODE, __x)
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE_SHIFT 5
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE_MASK 0x000000E0
+#define DSI_TVG_CTL_TVG_STRIPE_SIZE(__x) \
+ DSI_VAL2REG(DSI_TVG_CTL, TVG_STRIPE_SIZE, __x)
+#define DSI_TVG_IMG_SIZE 0x000000CC
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_SHIFT 0
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_MASK 0x00001FFF
+#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE(__x) \
+ DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_LINE_SIZE, __x)
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE_SHIFT 16
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE_MASK 0x07FF0000
+#define DSI_TVG_IMG_SIZE_TVG_NBLINE(__x) \
+ DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_NBLINE, __x)
+#define DSI_TVG_COLOR1 0x000000D0
+#define DSI_TVG_COLOR1_COL1_RED_SHIFT 0
+#define DSI_TVG_COLOR1_COL1_RED_MASK 0x000000FF
+#define DSI_TVG_COLOR1_COL1_RED(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_RED, __x)
+#define DSI_TVG_COLOR1_COL1_GREEN_SHIFT 8
+#define DSI_TVG_COLOR1_COL1_GREEN_MASK 0x0000FF00
+#define DSI_TVG_COLOR1_COL1_GREEN(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_GREEN, __x)
+#define DSI_TVG_COLOR1_COL1_BLUE_SHIFT 16
+#define DSI_TVG_COLOR1_COL1_BLUE_MASK 0x00FF0000
+#define DSI_TVG_COLOR1_COL1_BLUE(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR1, COL1_BLUE, __x)
+#define DSI_TVG_COLOR2 0x000000D4
+#define DSI_TVG_COLOR2_COL2_RED_SHIFT 0
+#define DSI_TVG_COLOR2_COL2_RED_MASK 0x000000FF
+#define DSI_TVG_COLOR2_COL2_RED(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_RED, __x)
+#define DSI_TVG_COLOR2_COL2_GREEN_SHIFT 8
+#define DSI_TVG_COLOR2_COL2_GREEN_MASK 0x0000FF00
+#define DSI_TVG_COLOR2_COL2_GREEN(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_GREEN, __x)
+#define DSI_TVG_COLOR2_COL2_BLUE_SHIFT 16
+#define DSI_TVG_COLOR2_COL2_BLUE_MASK 0x00FF0000
+#define DSI_TVG_COLOR2_COL2_BLUE(__x) \
+ DSI_VAL2REG(DSI_TVG_COLOR2, COL2_BLUE, __x)
+#define DSI_TVG_STS 0x000000D8
+#define DSI_TVG_STS_TVG_RUNNING_SHIFT 0
+#define DSI_TVG_STS_TVG_RUNNING_MASK 0x00000001
+#define DSI_TVG_STS_TVG_RUNNING(__x) \
+ DSI_VAL2REG(DSI_TVG_STS, TVG_RUNNING, __x)
+#define DSI_TBG_CTL 0x000000E0
+#define DSI_TBG_CTL_TBG_START_SHIFT 0
+#define DSI_TBG_CTL_TBG_START_MASK 0x00000001
+#define DSI_TBG_CTL_TBG_START(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_START, __x)
+#define DSI_TBG_CTL_TBG_HS_REQ_SHIFT 1
+#define DSI_TBG_CTL_TBG_HS_REQ_MASK 0x00000002
+#define DSI_TBG_CTL_TBG_HS_REQ(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_HS_REQ, __x)
+#define DSI_TBG_CTL_TBG_DATA_SEL_SHIFT 2
+#define DSI_TBG_CTL_TBG_DATA_SEL_MASK 0x00000004
+#define DSI_TBG_CTL_TBG_DATA_SEL(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_DATA_SEL, __x)
+#define DSI_TBG_CTL_TBG_MODE_SHIFT 3
+#define DSI_TBG_CTL_TBG_MODE_MASK 0x00000018
+#define DSI_TBG_CTL_TBG_MODE_1BYTE 0
+#define DSI_TBG_CTL_TBG_MODE_2BYTE 1
+#define DSI_TBG_CTL_TBG_MODE_BURST_COUNTER 2
+#define DSI_TBG_CTL_TBG_MODE_BURST 3
+#define DSI_TBG_CTL_TBG_MODE_ENUM(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, DSI_TBG_CTL_TBG_MODE_##__x)
+#define DSI_TBG_CTL_TBG_MODE(__x) \
+ DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, __x)
+#define DSI_TBG_SETTING 0x000000E4
+#define DSI_TBG_SETTING_TBG_DATA_SHIFT 0
+#define DSI_TBG_SETTING_TBG_DATA_MASK 0x0000FFFF
+#define DSI_TBG_SETTING_TBG_DATA(__x) \
+ DSI_VAL2REG(DSI_TBG_SETTING, TBG_DATA, __x)
+#define DSI_TBG_SETTING_TBG_CPT_SHIFT 16
+#define DSI_TBG_SETTING_TBG_CPT_MASK 0x0FFF0000
+#define DSI_TBG_SETTING_TBG_CPT(__x) \
+ DSI_VAL2REG(DSI_TBG_SETTING, TBG_CPT, __x)
+#define DSI_TBG_STS 0x000000E8
+#define DSI_TBG_STS_TBG_STATUS_SHIFT 0
+#define DSI_TBG_STS_TBG_STATUS_MASK 0x00000001
+#define DSI_TBG_STS_TBG_STATUS(__x) \
+ DSI_VAL2REG(DSI_TBG_STS, TBG_STATUS, __x)
+#define DSI_MCTL_MAIN_STS_CTL 0x000000F0
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_SHIFT 0
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_SHIFT 2
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_SHIFT 3
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_SHIFT 4
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_SHIFT 5
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_SHIFT 7
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EN, __x)
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_SHIFT 16
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_MASK 0x00010000
+#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_SHIFT 17
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_MASK 0x00020000
+#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_SHIFT 18
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_MASK 0x00040000
+#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_SHIFT 19
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_MASK 0x00080000
+#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_SHIFT 20
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_MASK 0x00100000
+#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_SHIFT 21
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_MASK 0x00200000
+#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_SHIFT 22
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_MASK 0x00400000
+#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_SHIFT 23
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_MASK 0x00800000
+#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL 0x000000F4
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_SHIFT 0
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_MASK 0x00000001
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_SHIFT 1
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_MASK 0x00000002
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_SHIFT 2
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_MASK 0x00000004
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_SHIFT 3
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_MASK 0x00000008
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_SHIFT 4
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_MASK 0x00000010
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_SHIFT 5
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EN, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_SHIFT 16
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_MASK 0x00010000
+#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_SHIFT 17
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_MASK 0x00020000
+#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_SHIFT 18
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_MASK 0x00040000
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_SHIFT 19
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_MASK 0x00080000
+#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_SHIFT 20
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_MASK 0x00100000
+#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EDGE, __x)
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_SHIFT 21
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_MASK 0x00200000
+#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL 0x000000F8
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_SHIFT 1
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_SHIFT 2
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_SHIFT 3
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_SHIFT 4
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_SHIFT 5
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_SHIFT 6
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_SHIFT 7
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_SHIFT 8
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_SHIFT 9
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_SHIFT 10
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_SHIFT 16
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_MASK 0x00010000
+#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_SHIFT 17
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_MASK 0x00020000
+#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_SHIFT 18
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_MASK 0x00040000
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_SHIFT 19
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_MASK 0x00080000
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_SHIFT 20
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_MASK 0x00100000
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_SHIFT 21
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_MASK 0x00200000
+#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_SHIFT 22
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_MASK 0x00400000
+#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_SHIFT 23
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_MASK 0x00800000
+#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_SHIFT 24
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_MASK 0x01000000
+#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_SHIFT 25
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_MASK 0x02000000
+#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EDGE, __x)
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_SHIFT 26
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_MASK 0x04000000
+#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL 0x000000FC
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EN, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_SHIFT 16
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_MASK 0x00010000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_SHIFT 17
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_MASK 0x00020000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_SHIFT 18
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_MASK 0x00040000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_SHIFT 19
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_MASK 0x00080000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_SHIFT 20
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_MASK 0x00100000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_SHIFT 21
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_MASK 0x00200000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_SHIFT 22
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_MASK 0x00400000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_SHIFT 23
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_MASK 0x00800000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EDGE, __x)
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_SHIFT 24
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_MASK 0x01000000
+#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL 0x00000100
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_SHIFT 0
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_MASK 0x00000001
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_SHIFT 1
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_MASK 0x00000002
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_SHIFT 2
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_MASK 0x00000004
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_SHIFT 3
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_MASK 0x00000008
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EN, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_SHIFT 4
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_MASK 0x00000010
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EN, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_SHIFT 5
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_MASK 0x00000020
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_SHIFT 6
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_MASK 0x00000040
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_SHIFT 7
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_MASK 0x00000080
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_SHIFT 8
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_MASK 0x00000100
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EN, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_SHIFT 9
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_MASK 0x00000200
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EN, __x)
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_SHIFT 16
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_MASK 0x00010000
+#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_SHIFT 17
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_MASK 0x00020000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_SHIFT 18
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_MASK 0x00040000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_SHIFT 19
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_MASK 0x00080000
+#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_SHIFT 20
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_MASK 0x00100000
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_SHIFT 21
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_MASK 0x00200000
+#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_SHIFT 22
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_MASK 0x00400000
+#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_SHIFT 23
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_MASK 0x00800000
+#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_SHIFT 24
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_MASK 0x01000000
+#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_SHIFT 25
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_MASK 0x02000000
+#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EDGE, __x)
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_SHIFT 26
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_MASK 0x04000000
+#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RECOVERY_EDGE, __x)
+#define DSI_TG_STS_CTL 0x00000104
+#define DSI_TG_STS_CTL_TVG_STS_EN_SHIFT 0
+#define DSI_TG_STS_CTL_TVG_STS_EN_MASK 0x00000001
+#define DSI_TG_STS_CTL_TVG_STS_EN(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EN, __x)
+#define DSI_TG_STS_CTL_TBG_STS_EN_SHIFT 1
+#define DSI_TG_STS_CTL_TBG_STS_EN_MASK 0x00000002
+#define DSI_TG_STS_CTL_TBG_STS_EN(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EN, __x)
+#define DSI_TG_STS_CTL_TVG_STS_EDGE_SHIFT 16
+#define DSI_TG_STS_CTL_TVG_STS_EDGE_MASK 0x00010000
+#define DSI_TG_STS_CTL_TVG_STS_EDGE(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EDGE, __x)
+#define DSI_TG_STS_CTL_TBG_STS_EDGE_SHIFT 17
+#define DSI_TG_STS_CTL_TBG_STS_EDGE_MASK 0x00020000
+#define DSI_TG_STS_CTL_TBG_STS_EDGE(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL 0x00000108
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_SHIFT 6
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_MASK 0x00000040
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_SHIFT 7
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_MASK 0x00000080
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_SHIFT 8
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_MASK 0x00000100
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_SHIFT 9
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_MASK 0x00000200
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_SHIFT 10
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_MASK 0x00000400
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_SHIFT 11
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_MASK 0x00000800
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_SHIFT 12
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_MASK 0x00001000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_SHIFT 13
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_MASK 0x00002000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_SHIFT 14
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_MASK 0x00004000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_SHIFT 15
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_MASK 0x00008000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EN, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_SHIFT 22
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_MASK 0x00400000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_SHIFT 23
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_MASK 0x00800000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_SHIFT 24
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_MASK 0x01000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_SHIFT 25
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_MASK 0x02000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_SHIFT 26
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_MASK 0x04000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_SHIFT 27
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_MASK 0x08000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_SHIFT 28
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_MASK 0x10000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_SHIFT 29
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_MASK 0x20000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_SHIFT 30
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_MASK 0x40000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EDGE, __x)
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_SHIFT 31
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_MASK 0x80000000
+#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE(__x) \
+ DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EDGE, __x)
+#define DSI_MCTL_MAIN_STS_CLR 0x00000110
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_SHIFT 0
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, PLL_LOCK_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_SHIFT 1
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CLKLANE_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_SHIFT 2
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT1_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_SHIFT 3
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT2_READY_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_SHIFT 4
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, HSTX_TO_ERR_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_SHIFT 5
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, LPRX_TO_ERR_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_SHIFT 6
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CRS_UNTERM_PCK_CLR, __x)
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_SHIFT 7
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, VRS_UNTERM_PCK_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR 0x00000114
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_SHIFT 0
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_MASK 0x00000001
+#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_NO_TE_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_SHIFT 1
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_MASK 0x00000002
+#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_TE_MISS_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_SHIFT 2
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_MASK 0x00000004
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI1_UNDERRUN_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_SHIFT 3
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_MASK 0x00000008
+#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI2_UNDERRUN_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_SHIFT 4
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_MASK 0x00000010
+#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_UNWANTED_RD_CLR, __x)
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_SHIFT 5
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_MASK 0x00000020
+#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, CSM_RUNNING_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR 0x00000118
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_SHIFT 0
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, CMD_TRANSMISSION_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_SHIFT 1
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, WRITE_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_SHIFT 2
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_SHIFT 3
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_SHIFT 4
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_SHIFT 5
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_SHIFT 6
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_SHIFT 7
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TE_RECEIVED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_SHIFT 8
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_COMPLETED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_SHIFT 9
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_FINISHED_CLR, __x)
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_SHIFT 10
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_WITH_ERR_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR 0x0000011C
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_FIXED_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNCORRECTABLE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_CHECKSUM_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNDECODABLE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_RECEIVE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_OVERSIZE_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_WRONG_LENGTH_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_MISSING_EOT_CLR, __x)
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_EOT_WITH_ERR_CLR, __x)
+#define DSI_VID_MODE_STS_CLR 0x00000120
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_SHIFT 0
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_MASK 0x00000001
+#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_STS_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_SHIFT 1
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_MASK 0x00000002
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_DATA_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_SHIFT 2
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_MASK 0x00000004
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_HSYNC_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_SHIFT 3
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_MASK 0x00000008
+#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_VSYNC_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_SHIFT 4
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_MASK 0x00000010
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_LENGTH_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_SHIFT 5
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_MASK 0x00000020
+#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_HEIGHT_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_SHIFT 6
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_MASK 0x00000040
+#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_BURSTWRITE_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_SHIFT 7
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_MASK 0x00000080
+#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGWRITE_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_SHIFT 8
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_MASK 0x00000100
+#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGREAD_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_SHIFT 9
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_MASK 0x00000200
+#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_VRS_WRONG_LENGTH_CLR, __x)
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_SHIFT 10
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_MASK 0x00000400
+#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_RECOVERY_CLR, __x)
+#define DSI_TG_STS_CLR 0x00000124
+#define DSI_TG_STS_CLR_TVG_STS_CLR_SHIFT 0
+#define DSI_TG_STS_CLR_TVG_STS_CLR_MASK 0x00000001
+#define DSI_TG_STS_CLR_TVG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CLR, TVG_STS_CLR, __x)
+#define DSI_TG_STS_CLR_TBG_STS_CLR_SHIFT 1
+#define DSI_TG_STS_CLR_TBG_STS_CLR_MASK 0x00000002
+#define DSI_TG_STS_CLR_TBG_STS_CLR(__x) \
+ DSI_VAL2REG(DSI_TG_STS_CLR, TBG_STS_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR 0x00000128
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_2_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_1_CLR, __x)
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_2_CLR, __x)
+#define DSI_MCTL_MAIN_STS_FLAG 0x00000130
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_SHIFT 0
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_MASK 0x00000001
+#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, PLL_LOCK_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_SHIFT 1
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_MASK 0x00000002
+#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CLKLANE_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_SHIFT 2
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_MASK 0x00000004
+#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT1_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_SHIFT 3
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_MASK 0x00000008
+#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT2_READY_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_SHIFT 4
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_MASK 0x00000010
+#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, HSTX_TO_ERR_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_SHIFT 5
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_MASK 0x00000020
+#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, LPRX_TO_ERR_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_SHIFT 6
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_MASK 0x00000040
+#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CRS_UNTERM_PCK_FLAG, __x)
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_SHIFT 7
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_MASK 0x00000080
+#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, VRS_UNTERM_PCK_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG 0x00000134
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_SHIFT 0
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_MASK 0x00000001
+#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_SHIFT 1
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_MASK 0x00000002
+#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_TE_MISS_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_SHIFT 2
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_MASK 0x00000004
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI1_UNDERRUN_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_SHIFT 3
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_MASK 0x00000008
+#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI2_UNDERRUN_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_SHIFT 4
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_MASK 0x00000010
+#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_UNWANTED_RD_FLAG, __x)
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_SHIFT 5
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_MASK 0x00000020
+#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG(__x) \
+ DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, CSM_RUNNING_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG 0x00000138
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_SHIFT 0
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_MASK 0x00000001
+#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, CMD_TRANSMISSION_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_SHIFT 1
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_MASK 0x00000002
+#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, WRITE_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_SHIFT 2
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_MASK 0x00000004
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_SHIFT 3
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_MASK 0x00000008
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_SHIFT 4
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_MASK 0x00000010
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_SHIFT 5
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_MASK 0x00000020
+#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_SHIFT 6
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_MASK 0x00000040
+#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_SHIFT 7
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_MASK 0x00000080
+#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_SHIFT 8
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_MASK 0x00000100
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_COMPLETED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_SHIFT 9
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_MASK 0x00000200
+#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_FINISHED_FLAG, __x)
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_SHIFT 10
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_MASK 0x00000400
+#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_WITH_ERR_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG 0x0000013C
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_SHIFT 0
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_MASK 0x00000001
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_FIXED_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_SHIFT 1
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_MASK 0x00000002
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNCORRECTABLE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_SHIFT 2
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_MASK 0x00000004
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_CHECKSUM_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_SHIFT 3
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_MASK 0x00000008
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNDECODABLE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_SHIFT 4
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_MASK 0x00000010
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_RECEIVE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_SHIFT 5
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_MASK 0x00000020
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_OVERSIZE_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_SHIFT 6
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_MASK 0x00000040
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_WRONG_LENGTH_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_SHIFT 7
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_MASK 0x00000080
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_MISSING_EOT_FLAG, __x)
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_SHIFT 8
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_MASK 0x00000100
+#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG(__x) \
+ DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_EOT_WITH_ERR_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG 0x00000140
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_SHIFT 0
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_MASK 0x00000001
+#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_STS_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_SHIFT 1
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_MASK 0x00000002
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_DATA_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_SHIFT 2
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_MASK 0x00000004
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_HSYNC_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_SHIFT 3
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_MASK 0x00000008
+#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_VSYNC_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_SHIFT 4
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_MASK 0x00000010
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_LENGTH_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_SHIFT 5
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_MASK 0x00000020
+#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_HEIGHT_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_SHIFT 6
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_MASK 0x00000040
+#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_BURSTWRITE_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_SHIFT 7
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_MASK 0x00000080
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGWRITE_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_SHIFT 8
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_MASK 0x00000100
+#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGREAD_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_SHIFT 9
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_MASK 0x00000200
+#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_VRS_WRONG_LENGTH_FLAG, __x)
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_SHIFT 10
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_MASK 0x00000400
+#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG(__x) \
+ DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_RECOVERY_FLAG, __x)
+#define DSI_TG_STS_FLAG 0x00000144
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG_SHIFT 0
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG_MASK 0x00000001
+#define DSI_TG_STS_FLAG_TVG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_TG_STS_FLAG, TVG_STS_FLAG, __x)
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG_SHIFT 1
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG_MASK 0x00000002
+#define DSI_TG_STS_FLAG_TBG_STS_FLAG(__x) \
+ DSI_VAL2REG(DSI_TG_STS_FLAG, TBG_STS_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG 0x00000148
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_SHIFT 6
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_MASK 0x00000040
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_SHIFT 7
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_MASK 0x00000080
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_SHIFT 8
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_MASK 0x00000100
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_SHIFT 9
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_MASK 0x00000200
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_SHIFT 10
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_MASK 0x00000400
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_SHIFT 11
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_MASK 0x00000800
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_SHIFT 12
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_MASK 0x00001000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_SHIFT 13
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_MASK 0x00002000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_2_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_SHIFT 14
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_MASK 0x00004000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_1_FLAG, __x)
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_SHIFT 15
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_MASK 0x00008000
+#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG(__x) \
+ DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_2_FLAG, __x)
+#define DSI_DPHY_LANES_TRIM 0x00000150
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_SHIFT 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_MASK 0x00000003
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_SHIFT 2
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_MASK 0x00000004
+#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_CD_OFF_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_SHIFT 3
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_MASK 0x00000008
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_SHIFT 4
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_MASK 0x00000010
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_SHIFT 5
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_MASK 0x00000020
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT1, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_SHIFT 6
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_MASK 0x000000C0
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_SHIFT 8
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_MASK 0x00000300
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_RX_VIL_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_SHIFT 10
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_MASK 0x00000C00
+#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_TX_SLEWRATE_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_SHIFT 12
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_MASK 0x00001000
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_81 0
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90 1
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, \
+ DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_##__x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_SHIFT 13
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_MASK 0x00002000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_SHIFT 14
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_MASK 0x00004000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_SHIFT 15
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_MASK 0x00008000
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_CLK, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_SHIFT 16
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_MASK 0x00030000
+#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_SHIFT 18
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_MASK 0x00040000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_SHIFT 19
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_MASK 0x00080000
+#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT2, __x)
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_SHIFT 20
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_MASK 0x00100000
+#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2(__x) \
+ DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT2, __x)
+#define DSI_ID_REG 0x00000FF0
+#define DSI_ID_REG_Y_SHIFT 0
+#define DSI_ID_REG_Y_MASK 0x0000000F
+#define DSI_ID_REG_Y(__x) \
+ DSI_VAL2REG(DSI_ID_REG, Y, __x)
+#define DSI_ID_REG_X_SHIFT 4
+#define DSI_ID_REG_X_MASK 0x000000F0
+#define DSI_ID_REG_X(__x) \
+ DSI_VAL2REG(DSI_ID_REG, X, __x)
+#define DSI_ID_REG_H_SHIFT 8
+#define DSI_ID_REG_H_MASK 0x00000300
+#define DSI_ID_REG_H(__x) \
+ DSI_VAL2REG(DSI_ID_REG, H, __x)
+#define DSI_ID_REG_PRODUCT_ID_SHIFT 10
+#define DSI_ID_REG_PRODUCT_ID_MASK 0x0003FC00
+#define DSI_ID_REG_PRODUCT_ID(__x) \
+ DSI_VAL2REG(DSI_ID_REG, PRODUCT_ID, __x)
+#define DSI_ID_REG_VENDOR_ID_SHIFT 18
+#define DSI_ID_REG_VENDOR_ID_MASK 0xFFFC0000
+#define DSI_ID_REG_VENDOR_ID(__x) \
+ DSI_VAL2REG(DSI_ID_REG, VENDOR_ID, __x)
+#define DSI_IP_CONF 0x00000FF4
+#define DSI_IP_CONF_FIFO_SIZE_SHIFT 0
+#define DSI_IP_CONF_FIFO_SIZE_MASK 0x0000003F
+#define DSI_IP_CONF_FIFO_SIZE(__x) \
+ DSI_VAL2REG(DSI_IP_CONF, FIFO_SIZE, __x)
diff --git a/drivers/video/mcde/mcde_bus.c b/drivers/video/mcde/mcde_bus.c
new file mode 100644
index 00000000000..bdcf65b0fb9
--- /dev/null
+++ b/drivers/video/mcde/mcde_bus.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE display bus driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/notifier.h>
+
+#include <video/mcde_display.h>
+#include <video/mcde_dss.h>
+
+#define to_mcde_display_driver(__drv) \
+ container_of((__drv), struct mcde_display_driver, driver)
+
+static BLOCKING_NOTIFIER_HEAD(bus_notifier_list);
+
+static int mcde_drv_suspend(struct device *_dev, pm_message_t state);
+static int mcde_drv_resume(struct device *_dev);
+struct bus_type mcde_bus_type;
+
+static int mcde_suspend_device(struct device *dev, void *data)
+{
+ pm_message_t* state = (pm_message_t *) data;
+ if (dev->driver && dev->driver->suspend)
+ return dev->driver->suspend(dev, *state);
+ return 0;
+}
+
+static int mcde_resume_device(struct device *dev, void *data)
+{
+ if (dev->driver && dev->driver->resume)
+ return dev->driver->resume(dev);
+ return 0;
+}
+
+/* Bus driver */
+
+static int mcde_bus_match(struct device *_dev, struct device_driver *driver)
+{
+ pr_debug("Matching device %s with driver %s\n",
+ dev_name(_dev), driver->name);
+
+ return strncmp(dev_name(_dev), driver->name, strlen(driver->name)) == 0;
+}
+
+static int mcde_bus_suspend(struct device *_dev, pm_message_t state)
+{
+ int ret;
+ ret = bus_for_each_dev(&mcde_bus_type, NULL, &state,
+ mcde_suspend_device);
+ if (ret) {
+ /* TODO Resume all suspended devices */
+ /* mcde_bus_resume(dev); */
+ return ret;
+ }
+ return 0;
+}
+
+static int mcde_bus_resume(struct device *_dev)
+{
+ return bus_for_each_dev(&mcde_bus_type, NULL, NULL, mcde_resume_device);
+}
+
+struct bus_type mcde_bus_type = {
+ .name = "mcde_bus",
+ .match = mcde_bus_match,
+ .suspend = mcde_bus_suspend,
+ .resume = mcde_bus_resume,
+};
+
+static int mcde_drv_probe(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ return drv->probe(dev);
+}
+
+static int mcde_drv_remove(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ return drv->remove(dev);
+}
+
+static void mcde_drv_shutdown(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ drv->shutdown(dev);
+}
+
+static int mcde_drv_suspend(struct device *_dev, pm_message_t state)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ if (drv->suspend)
+ return drv->suspend(dev, state);
+ else
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ return dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF);
+#else
+ return 0;
+#endif
+}
+
+static int mcde_drv_resume(struct device *_dev)
+{
+ struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver);
+ struct mcde_display_device *dev = to_mcde_display_device(_dev);
+
+ if (drv->resume)
+ return drv->resume(dev);
+ else
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ return dev->set_power_mode(dev, MCDE_DISPLAY_PM_STANDBY);
+#else
+ return 0;
+#endif
+}
+
+/* Bus device */
+
+static void mcde_bus_release(struct device *dev)
+{
+}
+
+struct device mcde_bus = {
+ .init_name = "mcde_bus",
+ .release = mcde_bus_release
+};
+
+/* Public bus API */
+
+int mcde_display_driver_register(struct mcde_display_driver *drv)
+{
+ drv->driver.bus = &mcde_bus_type;
+ if (drv->probe)
+ drv->driver.probe = mcde_drv_probe;
+ if (drv->remove)
+ drv->driver.remove = mcde_drv_remove;
+ if (drv->shutdown)
+ drv->driver.shutdown = mcde_drv_shutdown;
+ drv->driver.suspend = mcde_drv_suspend;
+ drv->driver.resume = mcde_drv_resume;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(mcde_display_driver_register);
+
+void mcde_display_driver_unregister(struct mcde_display_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(mcde_display_driver_unregister);
+
+static void mcde_display_dev_release(struct device *dev)
+{
+ /* Do nothing */
+}
+
+int mcde_display_device_register(struct mcde_display_device *dev)
+{
+ /* Setup device */
+ if (!dev)
+ return -EINVAL;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.bus = &mcde_bus_type;
+ if (dev->dev.parent != NULL)
+ dev->dev.parent = &mcde_bus;
+ dev->dev.release = mcde_display_dev_release;
+ if (dev->id != -1)
+ dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id);
+ else
+ dev_set_name(&dev->dev, dev->name);
+
+ mcde_display_init_device(dev);
+
+ return device_register(&dev->dev);
+}
+EXPORT_SYMBOL(mcde_display_device_register);
+
+void mcde_display_device_unregister(struct mcde_display_device *dev)
+{
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL(mcde_display_device_unregister);
+
+/* Notifications */
+int mcde_dss_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bus_notifier_list, nb);
+}
+EXPORT_SYMBOL(mcde_dss_register_notifier);
+
+int mcde_dss_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bus_notifier_list, nb);
+}
+EXPORT_SYMBOL(mcde_dss_unregister_notifier);
+
+static int bus_notify_callback(struct notifier_block *nb,
+ unsigned long event, void *dev)
+{
+ struct mcde_display_device *ddev = to_mcde_display_device(dev);
+
+ if (event == BUS_NOTIFY_BOUND_DRIVER) {
+ ddev->initialized = true;
+ blocking_notifier_call_chain(&bus_notifier_list,
+ MCDE_DSS_EVENT_DISPLAY_REGISTERED, ddev);
+ } else if (event == BUS_NOTIFY_UNBIND_DRIVER) {
+ ddev->initialized = false;
+ blocking_notifier_call_chain(&bus_notifier_list,
+ MCDE_DSS_EVENT_DISPLAY_UNREGISTERED, ddev);
+ }
+ return 0;
+}
+
+struct notifier_block bus_nb = {
+ .notifier_call = bus_notify_callback,
+};
+
+/* Driver init/exit */
+
+int __init mcde_display_init(void)
+{
+ int ret;
+
+ ret = bus_register(&mcde_bus_type);
+ if (ret) {
+ pr_warning("Unable to register bus type\n");
+ goto no_bus_registration;
+ }
+ ret = device_register(&mcde_bus);
+ if (ret) {
+ pr_warning("Unable to register bus device\n");
+ goto no_device_registration;
+ }
+ ret = bus_register_notifier(&mcde_bus_type, &bus_nb);
+ if (ret) {
+ pr_warning("Unable to register bus notifier\n");
+ goto no_bus_notifier;
+ }
+
+ goto out;
+
+no_bus_notifier:
+ device_unregister(&mcde_bus);
+no_device_registration:
+ bus_unregister(&mcde_bus_type);
+no_bus_registration:
+out:
+ return ret;
+}
+
+void mcde_display_exit(void)
+{
+ bus_unregister_notifier(&mcde_bus_type, &bus_nb);
+ device_unregister(&mcde_bus);
+ bus_unregister(&mcde_bus_type);
+}
diff --git a/drivers/video/mcde/mcde_debugfs.c b/drivers/video/mcde/mcde_debugfs.c
new file mode 100644
index 00000000000..586b1787d00
--- /dev/null
+++ b/drivers/video/mcde/mcde_debugfs.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "mcde_debugfs.h"
+
+#define MAX_NUM_OVERLAYS 2
+#define MAX_NUM_CHANNELS 4
+#define DEFAULT_DMESG_FPS_LOG_INTERVAL 100
+
+struct fps_info {
+ u32 enable_dmesg;
+ u32 interval_ms;
+ struct timespec timestamp_last;
+ u32 frame_counter_last;
+ u32 frame_counter;
+ u32 fpks;
+};
+
+struct overlay_info {
+ u8 id;
+ struct dentry *dentry;
+ struct fps_info fps;
+};
+
+struct channel_info {
+ u8 id;
+ struct dentry *dentry;
+ struct mcde_chnl_state *chnl;
+ struct fps_info fps;
+ struct overlay_info overlays[MAX_NUM_OVERLAYS];
+};
+
+static struct mcde_info {
+ struct device *dev;
+ struct dentry *dentry;
+ struct channel_info channels[MAX_NUM_CHANNELS];
+} mcde;
+
+/* Requires: lhs > rhs */
+static inline u32 timespec_ms_diff(struct timespec lhs, struct timespec rhs)
+{
+ struct timespec tmp_ts = timespec_sub(lhs, rhs);
+ u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
+ do_div(tmp_ns, NSEC_PER_MSEC);
+ return (u32)tmp_ns;
+}
+
+/* Returns "frames per 1000 secs", divide by 1000 to get fps with 3 decimals */
+static u32 update_fps(struct fps_info *fps)
+{
+ struct timespec now;
+ u32 fpks = 0, ms_since_last, num_frames;
+
+ getrawmonotonic(&now);
+ fps->frame_counter++;
+
+ ms_since_last = timespec_ms_diff(now, fps->timestamp_last);
+ num_frames = fps->frame_counter - fps->frame_counter_last;
+ if (num_frames > 1 && ms_since_last >= fps->interval_ms) {
+ fpks = (num_frames * 1000000) / ms_since_last;
+ fps->timestamp_last = now;
+ fps->frame_counter_last = fps->frame_counter;
+ fps->fpks = fpks;
+ }
+
+ return fpks;
+}
+
+static void update_chnl_fps(struct channel_info *ci)
+{
+ u32 fpks = update_fps(&ci->fps);
+ if (fpks && ci->fps.enable_dmesg)
+ dev_info(mcde.dev, "FPS: chnl=%d fps=%d.%.3d\n", ci->id,
+ fpks / 1000, fpks % 1000);
+}
+
+static void update_ovly_fps(struct channel_info *ci, struct overlay_info *oi)
+{
+ u32 fpks = update_fps(&oi->fps);
+ if (fpks && oi->fps.enable_dmesg)
+ dev_info(mcde.dev, "FPS: ovly=%d.%d fps=%d.%.3d\n", ci->id,
+ oi->id, fpks / 1000, fpks % 1000);
+}
+
+int mcde_debugfs_create(struct device *dev)
+{
+ if (mcde.dev)
+ return -EBUSY;
+
+ mcde.dentry = debugfs_create_dir("mcde", NULL);
+ if (!mcde.dentry)
+ return -ENOMEM;
+ mcde.dev = dev;
+
+ return 0;
+}
+
+static struct channel_info *find_chnl(u8 chnl_id)
+{
+ if (chnl_id > MAX_NUM_CHANNELS)
+ return NULL;
+ return &mcde.channels[chnl_id];
+}
+
+static struct overlay_info *find_ovly(struct channel_info *ci, u8 ovly_id)
+{
+ if (!ci || ovly_id >= MAX_NUM_OVERLAYS)
+ return NULL;
+ return &ci->overlays[ovly_id];
+}
+
+static void create_fps_files(struct dentry *dentry, struct fps_info *fps)
+{
+ debugfs_create_u32("frame_counter", S_IRUGO, dentry,
+ &fps->frame_counter);
+ debugfs_create_u32("frames_per_ksecs", S_IRUGO, dentry, &fps->fpks);
+ debugfs_create_u32("interval_ms", S_IRUGO|S_IWUGO, dentry,
+ &fps->interval_ms);
+ debugfs_create_u32("dmesg", S_IRUGO|S_IWUGO, dentry,
+ &fps->enable_dmesg);
+}
+
+int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ char name[10];
+
+ if (!chnl || !ci)
+ return -EINVAL;
+ if (ci->chnl)
+ return -EBUSY;
+
+ snprintf(name, sizeof(name), "chnl%d", chnl_id);
+ ci->dentry = debugfs_create_dir(name, mcde.dentry);
+ if (!ci->dentry)
+ return -ENOMEM;
+
+ create_fps_files(ci->dentry, &ci->fps);
+
+ ci->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL;
+ ci->id = chnl_id;
+ ci->chnl = chnl;
+
+ return 0;
+}
+
+int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ struct overlay_info *oi = find_ovly(ci, ovly_id);
+ char name[10];
+
+ if (!oi || !ci || ovly_id >= MAX_NUM_OVERLAYS)
+ return -EINVAL;
+ if (oi->dentry)
+ return -EBUSY;
+
+ snprintf(name, sizeof(name), "ovly%d", ovly_id);
+ oi->dentry = debugfs_create_dir(name, ci->dentry);
+ if (!oi->dentry)
+ return -ENOMEM;
+
+ create_fps_files(oi->dentry, &oi->fps);
+
+ oi->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL;
+ oi->id = ovly_id;
+
+ return 0;
+}
+
+void mcde_debugfs_channel_update(u8 chnl_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+
+ if (!ci || !ci->chnl)
+ return;
+
+ update_chnl_fps(ci);
+}
+
+void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id)
+{
+ struct channel_info *ci = find_chnl(chnl_id);
+ struct overlay_info *oi = find_ovly(ci, ovly_id);
+
+ if (!oi || !oi->dentry)
+ return;
+
+ update_ovly_fps(ci, oi);
+}
+
diff --git a/drivers/video/mcde/mcde_debugfs.h b/drivers/video/mcde/mcde_debugfs.h
new file mode 100644
index 00000000000..9f1e7f18ea5
--- /dev/null
+++ b/drivers/video/mcde/mcde_debugfs.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __MCDE_DEBUGFS__H__
+#define __MCDE_DEBUGFS__H__
+
+#include <video/mcde.h>
+
+int mcde_debugfs_create(struct device *dev);
+int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl);
+int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id);
+
+void mcde_debugfs_channel_update(u8 chnl_id);
+void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id);
+
+#endif /* __MCDE_DEBUGFS__H__ */
+
diff --git a/drivers/video/mcde/mcde_display.c b/drivers/video/mcde/mcde_display.c
new file mode 100644
index 00000000000..9e9eb78516e
--- /dev/null
+++ b/drivers/video/mcde/mcde_display.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <video/mcde_display.h>
+
+/*temp*/
+#include <linux/delay.h>
+
+static void mcde_display_get_native_resolution_default(
+ struct mcde_display_device *ddev, u16 *x_res, u16 *y_res)
+{
+ if (x_res)
+ *x_res = ddev->native_x_res;
+ if (y_res)
+ *y_res = ddev->native_y_res;
+}
+
+static enum mcde_ovly_pix_fmt mcde_display_get_default_pixel_format_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->default_pixel_format;
+}
+
+static void mcde_display_get_physical_size_default(
+ struct mcde_display_device *ddev, u16 *width, u16 *height)
+{
+ if (width)
+ *width = ddev->physical_width;
+ if (height)
+ *height = ddev->physical_height;
+}
+
+static int mcde_display_set_power_mode_default(struct mcde_display_device *ddev,
+ enum mcde_display_power_mode power_mode)
+{
+ int ret = 0;
+
+ /* OFF -> STANDBY */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_OFF &&
+ power_mode != MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_enable) {
+ ret = ddev->platform_enable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ /* force register settings */
+ if (ddev->port->type == MCDE_PORTTYPE_DPI)
+ ddev->update_flags = UPDATE_FLAG_VIDEO_MODE | UPDATE_FLAG_PIXEL_FORMAT;
+ }
+
+ if (ddev->port->type == MCDE_PORTTYPE_DSI) {
+ /* STANDBY -> ON */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_ON) {
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_ON, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_ON;
+ } else if (ddev->power_mode == MCDE_DISPLAY_PM_ON &&
+ power_mode <= MCDE_DISPLAY_PM_STANDBY) {
+ /* ON -> STANDBY */
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_SET_DISPLAY_OFF, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = mcde_dsi_dcs_write(ddev->chnl_state,
+ DCS_CMD_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret)
+ return ret;
+
+ ddev->power_mode = MCDE_DISPLAY_PM_STANDBY;
+ }
+ } else if (ddev->port->type == MCDE_PORTTYPE_DPI) {
+ ddev->power_mode = power_mode;
+ } else if (ddev->power_mode != power_mode) {
+ return -EINVAL;
+ }
+
+ /* SLEEP -> OFF */
+ if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY &&
+ power_mode == MCDE_DISPLAY_PM_OFF) {
+ if (ddev->platform_disable) {
+ ret = ddev->platform_disable(ddev);
+ if (ret)
+ return ret;
+ }
+ ddev->power_mode = MCDE_DISPLAY_PM_OFF;
+ }
+
+ mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode);
+
+ return ret;
+}
+
+static inline enum mcde_display_power_mode mcde_display_get_power_mode_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->power_mode;
+}
+
+static inline int mcde_display_try_video_mode_default(
+ struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ /*
+ * DSI video mode:
+ * This function is intended for configuring supported video mode(s).
+ * Overload it into the panel driver file and set up blanking
+ * intervals and pixel clock according to below recommendations.
+ *
+ * vertical blanking parameters vbp, vfp, vsw are given in lines
+ * horizontal blanking parameters hbp, hfp, hsw are given in pixels
+ *
+ * video_mode->pixclock is the time between two pixels (in picoseconds)
+ * The source of the pixel clock is DSI PLL and it shall be set to
+ * meet the requirement
+ *
+ * non-burst mode:
+ * pixel clock (Hz) = (VACT+VBP+VFP+VSA) * (HACT+HBP+HFP+HSA) *
+ * framerate * bpp / num_data_lanes
+ *
+ * burst mode:
+ * pixel clock (Hz) > (VACT+VBP+VFP+VSA) * (HACT+HBP+HFP+HSA) *
+ * framerate * bpp / num_data_lanes * 1.1
+ * (1.1 is a 10% margin needed for burst mode calculations)
+ */
+ return 0;
+}
+
+static int mcde_display_set_video_mode_default(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ int ret;
+ struct mcde_video_mode channel_video_mode;
+
+ if (!video_mode)
+ return -EINVAL;
+
+ ddev->video_mode = *video_mode;
+ channel_video_mode = ddev->video_mode;
+ /* Dependant on if display should rotate or MCDE should rotate */
+ if (ddev->rotation == MCDE_DISPLAY_ROT_90_CCW ||
+ ddev->rotation == MCDE_DISPLAY_ROT_90_CW) {
+ channel_video_mode.xres = ddev->native_x_res;
+ channel_video_mode.yres = ddev->native_y_res;
+ }
+ ret = mcde_chnl_set_video_mode(ddev->chnl_state, &channel_video_mode);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set video mode\n", __func__);
+ return ret;
+ }
+
+ ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE;
+
+ return 0;
+}
+
+static inline void mcde_display_get_video_mode_default(
+ struct mcde_display_device *ddev, struct mcde_video_mode *video_mode)
+{
+ if (video_mode)
+ *video_mode = ddev->video_mode;
+}
+
+static int mcde_display_set_pixel_format_default(
+ struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format)
+{
+ int ret;
+
+ ddev->pixel_format = format;
+ ret = mcde_chnl_set_pixel_format(ddev->chnl_state,
+ ddev->port->pixel_format);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to set pixel format = %d\n",
+ __func__, format);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline enum mcde_ovly_pix_fmt mcde_display_get_pixel_format_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->pixel_format;
+}
+
+
+static int mcde_display_set_rotation_default(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ int ret;
+ u8 param = 0;
+ enum mcde_display_rotation final;
+
+ final = (360 + rotation - ddev->orientation) % 360;
+ ret = mcde_chnl_set_rotation(ddev->chnl_state, final);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (final == MCDE_DISPLAY_ROT_180_CW)
+ param = 0x40; /* Horizontal flip */
+ (void)mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_ADDRESS_MODE,
+ &param, 1);
+
+ ddev->rotation = rotation;
+ ddev->update_flags |= UPDATE_FLAG_ROTATION;
+
+ return 0;
+}
+
+static inline enum mcde_display_rotation mcde_display_get_rotation_default(
+ struct mcde_display_device *ddev)
+{
+ return ddev->rotation;
+}
+
+static int mcde_display_apply_config_default(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (!ddev->update_flags)
+ return 0;
+
+ if (ddev->update_flags & UPDATE_FLAG_VIDEO_MODE)
+ mcde_chnl_stop_flow(ddev->chnl_state);
+
+ ret = mcde_chnl_apply(ddev->chnl_state);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to apply to channel\n",
+ __func__);
+ return ret;
+ }
+ ddev->update_flags = 0;
+ ddev->first_update = true;
+
+ return 0;
+}
+
+static int mcde_display_invalidate_area_default(
+ struct mcde_display_device *ddev,
+ struct mcde_rectangle *area)
+{
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+ if (area) {
+ /* take union of rects */
+ u16 t;
+ t = min(ddev->update_area.x, area->x);
+ /* note should be > 0 */
+ ddev->update_area.w = max(ddev->update_area.x +
+ ddev->update_area.w,
+ area->x + area->w) - t;
+ ddev->update_area.x = t;
+ t = min(ddev->update_area.y, area->y);
+ ddev->update_area.h = max(ddev->update_area.y +
+ ddev->update_area.h,
+ area->y + area->h) - t;
+ ddev->update_area.y = t;
+ /* TODO: Implement real clipping when partial refresh is
+ activated.*/
+ ddev->update_area.w = min((u16) ddev->video_mode.xres,
+ (u16) ddev->update_area.w);
+ ddev->update_area.h = min((u16) ddev->video_mode.yres,
+ (u16) ddev->update_area.h);
+ } else {
+ ddev->update_area.x = 0;
+ ddev->update_area.y = 0;
+ ddev->update_area.w = ddev->video_mode.xres;
+ ddev->update_area.h = ddev->video_mode.yres;
+ /* Invalidate_area(ddev, NULL) means reset area to empty
+ * rectangle really. After that the rectangle should grow by
+ * taking an union (above). This means that the code should
+ * really look like below, however the code above is a temp fix
+ * for rotation.
+ * TODO: fix
+ * ddev->update_area.x = ddev->video_mode.xres;
+ * ddev->update_area.y = ddev->video_mode.yres;
+ * ddev->update_area.w = 0;
+ * ddev->update_area.h = 0;
+ */
+ }
+
+ return 0;
+}
+
+static int mcde_display_update_default(struct mcde_display_device *ddev,
+ bool tripple_buffer)
+{
+ int ret = 0;
+
+ ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area,
+ tripple_buffer);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "%s:Failed to update channel\n", __func__);
+ return ret;
+ }
+ if (ddev->first_update && ddev->on_first_update)
+ ddev->on_first_update(ddev);
+
+ if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) {
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_ON);
+ if (ret < 0) {
+ dev_warn(&ddev->dev,
+ "%s:Failed to set power mode to on\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ dev_vdbg(&ddev->dev, "Overlay updated, chnl=%d\n", ddev->chnl_id);
+
+ return 0;
+}
+
+static inline int mcde_display_on_first_update_default(
+ struct mcde_display_device *ddev)
+{
+ ddev->first_update = false;
+ return 0;
+}
+
+void mcde_display_init_device(struct mcde_display_device *ddev)
+{
+ /* Setup default callbacks */
+ ddev->get_native_resolution =
+ mcde_display_get_native_resolution_default;
+ ddev->get_default_pixel_format =
+ mcde_display_get_default_pixel_format_default;
+ ddev->get_physical_size = mcde_display_get_physical_size_default;
+ ddev->set_power_mode = mcde_display_set_power_mode_default;
+ ddev->get_power_mode = mcde_display_get_power_mode_default;
+ ddev->try_video_mode = mcde_display_try_video_mode_default;
+ ddev->set_video_mode = mcde_display_set_video_mode_default;
+ ddev->get_video_mode = mcde_display_get_video_mode_default;
+ ddev->set_pixel_format = mcde_display_set_pixel_format_default;
+ ddev->get_pixel_format = mcde_display_get_pixel_format_default;
+ ddev->set_rotation = mcde_display_set_rotation_default;
+ ddev->get_rotation = mcde_display_get_rotation_default;
+ ddev->apply_config = mcde_display_apply_config_default;
+ ddev->invalidate_area = mcde_display_invalidate_area_default;
+ ddev->update = mcde_display_update_default;
+ ddev->on_first_update = mcde_display_on_first_update_default;
+
+ mutex_init(&ddev->display_lock);
+}
+
diff --git a/drivers/video/mcde/mcde_dss.c b/drivers/video/mcde/mcde_dss.c
new file mode 100644
index 00000000000..35a6cbe9540
--- /dev/null
+++ b/drivers/video/mcde/mcde_dss.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE display sub system driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <video/mcde_dss.h>
+
+#define to_overlay(x) container_of(x, struct mcde_overlay, kobj)
+
+void overlay_release(struct kobject *kobj)
+{
+ struct mcde_overlay *ovly = to_overlay(kobj);
+
+ kfree(ovly);
+}
+
+struct kobj_type ovly_type = {
+ .release = overlay_release,
+};
+
+static int apply_overlay(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info, bool force)
+{
+ int ret = 0;
+ if (ovly->ddev->invalidate_area) {
+ /* TODO: transform ovly coord to screen coords (vmode):
+ * add offset
+ */
+ struct mcde_rectangle dirty = info->dirty;
+ mutex_lock(&ovly->ddev->display_lock);
+ ret = ovly->ddev->invalidate_area(ovly->ddev, &dirty);
+ mutex_unlock(&ovly->ddev->display_lock);
+ }
+
+ if (ovly->info.paddr != info->paddr || force)
+ mcde_ovly_set_source_buf(ovly->state, info->paddr);
+
+ if (ovly->info.stride != info->stride || ovly->info.fmt != info->fmt ||
+ force)
+ mcde_ovly_set_source_info(ovly->state, info->stride, info->fmt);
+ if (ovly->info.src_x != info->src_x ||
+ ovly->info.src_y != info->src_y ||
+ ovly->info.w != info->w ||
+ ovly->info.h != info->h || force)
+ mcde_ovly_set_source_area(ovly->state,
+ info->src_x, info->src_y, info->w, info->h);
+ if (ovly->info.dst_x != info->dst_x || ovly->info.dst_y != info->dst_y
+ || ovly->info.dst_z != info->dst_z ||
+ force)
+ mcde_ovly_set_dest_pos(ovly->state,
+ info->dst_x, info->dst_y, info->dst_z);
+
+ mcde_ovly_apply(ovly->state);
+ ovly->info = *info;
+
+ return ret;
+}
+
+/* MCDE DSS operations */
+
+int mcde_dss_open_channel(struct mcde_display_device *ddev)
+{
+ int ret = 0;
+ struct mcde_chnl_state *chnl;
+
+ mutex_lock(&ddev->display_lock);
+ /* Acquire MCDE resources */
+ chnl = mcde_chnl_get(ddev->chnl_id, ddev->fifo, ddev->port);
+ if (IS_ERR(chnl)) {
+ ret = PTR_ERR(chnl);
+ dev_warn(&ddev->dev, "Failed to acquire MCDE channel\n");
+ goto chnl_get_failed;
+ }
+ ddev->chnl_state = chnl;
+chnl_get_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_open_channel);
+
+void mcde_dss_close_channel(struct mcde_display_device *ddev)
+{
+ mutex_lock(&ddev->display_lock);
+ mcde_chnl_put(ddev->chnl_state);
+ ddev->chnl_state = NULL;
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_close_channel);
+
+int mcde_dss_enable_display(struct mcde_display_device *ddev)
+{
+ int ret;
+
+ if (ddev->enabled)
+ return 0;
+
+ mutex_lock(&ddev->display_lock);
+ mcde_chnl_enable(ddev->chnl_state);
+
+ /* Initiate display communication */
+ ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY);
+ if (ret < 0) {
+ dev_warn(&ddev->dev, "Failed to initialize display\n");
+ goto display_failed;
+ }
+
+ ret = ddev->set_rotation(ddev, ddev->get_rotation(ddev));
+ if (ret < 0)
+ dev_warn(&ddev->dev, "Failed to set rotation\n");
+
+ dev_dbg(&ddev->dev, "Display enabled, chnl=%d\n",
+ ddev->chnl_id);
+ ddev->enabled = true;
+ mutex_unlock(&ddev->display_lock);
+
+ return 0;
+
+display_failed:
+ mcde_chnl_disable(ddev->chnl_state);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_enable_display);
+
+void mcde_dss_disable_display(struct mcde_display_device *ddev)
+{
+ if (!ddev->enabled)
+ return;
+
+ /* TODO: Disable overlays */
+ mutex_lock(&ddev->display_lock);
+
+ mcde_chnl_stop_flow(ddev->chnl_state);
+
+ (void)ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF);
+
+ mcde_chnl_disable(ddev->chnl_state);
+
+ ddev->enabled = false;
+ mutex_unlock(&ddev->display_lock);
+
+ dev_dbg(&ddev->dev, "Display disabled, chnl=%d\n", ddev->chnl_id);
+}
+EXPORT_SYMBOL(mcde_dss_disable_display);
+
+int mcde_dss_apply_channel(struct mcde_display_device *ddev)
+{
+ int ret;
+ if (!ddev->apply_config)
+ return -EINVAL;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->apply_config(ddev);
+ mutex_unlock(&ddev->display_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_apply_channel);
+
+struct mcde_overlay *mcde_dss_create_overlay(struct mcde_display_device *ddev,
+ struct mcde_overlay_info *info)
+{
+ struct mcde_overlay *ovly;
+
+ ovly = kzalloc(sizeof(struct mcde_overlay), GFP_KERNEL);
+ if (!ovly)
+ return NULL;
+
+ kobject_init(&ovly->kobj, &ovly_type); /* Local ref */
+ kobject_get(&ovly->kobj); /* Creator ref */
+ INIT_LIST_HEAD(&ovly->list);
+ mutex_lock(&ddev->display_lock);
+ list_add(&ddev->ovlys, &ovly->list);
+ mutex_unlock(&ddev->display_lock);
+ ovly->info = *info;
+ ovly->ddev = ddev;
+
+ return ovly;
+}
+EXPORT_SYMBOL(mcde_dss_create_overlay);
+
+void mcde_dss_destroy_overlay(struct mcde_overlay *ovly)
+{
+ list_del(&ovly->list);
+ if (ovly->state)
+ mcde_dss_disable_overlay(ovly);
+ kobject_put(&ovly->kobj);
+}
+EXPORT_SYMBOL(mcde_dss_destroy_overlay);
+
+int mcde_dss_enable_overlay(struct mcde_overlay *ovly)
+{
+ int ret;
+
+ if (!ovly->ddev->chnl_state)
+ return -EINVAL;
+
+ if (!ovly->state) {
+ struct mcde_ovly_state *state;
+ state = mcde_ovly_get(ovly->ddev->chnl_state);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ dev_warn(&ovly->ddev->dev,
+ "Failed to acquire overlay\n");
+ return ret;
+ }
+ ovly->state = state;
+ }
+
+ apply_overlay(ovly, &ovly->info, true);
+
+ dev_vdbg(&ovly->ddev->dev, "Overlay enabled, chnl=%d\n",
+ ovly->ddev->chnl_id);
+ return 0;
+}
+EXPORT_SYMBOL(mcde_dss_enable_overlay);
+
+int mcde_dss_apply_overlay(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info)
+{
+ if (info == NULL)
+ info = &ovly->info;
+ return apply_overlay(ovly, info, false);
+}
+EXPORT_SYMBOL(mcde_dss_apply_overlay);
+
+void mcde_dss_disable_overlay(struct mcde_overlay *ovly)
+{
+ if (!ovly->state)
+ return;
+
+ mcde_ovly_put(ovly->state);
+
+ dev_dbg(&ovly->ddev->dev, "Overlay disabled, chnl=%d\n",
+ ovly->ddev->chnl_id);
+
+ ovly->state = NULL;
+}
+EXPORT_SYMBOL(mcde_dss_disable_overlay);
+
+int mcde_dss_update_overlay(struct mcde_overlay *ovly, bool tripple_buffer)
+{
+ int ret;
+ dev_vdbg(&ovly->ddev->dev, "Overlay update, chnl=%d\n",
+ ovly->ddev->chnl_id);
+
+ if (!ovly->state || !ovly->ddev->update || !ovly->ddev->invalidate_area)
+ return -EINVAL;
+
+ mutex_lock(&ovly->ddev->display_lock);
+ /* Do not perform an update if power mode is off */
+ if (ovly->ddev->get_power_mode(ovly->ddev) == MCDE_DISPLAY_PM_OFF) {
+ ret = 0;
+ goto power_mode_off;
+ }
+
+ ret = ovly->ddev->update(ovly->ddev, tripple_buffer);
+ if (ret)
+ goto update_failed;
+
+ ret = ovly->ddev->invalidate_area(ovly->ddev, NULL);
+
+power_mode_off:
+update_failed:
+ mutex_unlock(&ovly->ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_update_overlay);
+
+void mcde_dss_get_overlay_info(struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info) {
+ if (info)
+ *info = ovly->info;
+}
+EXPORT_SYMBOL(mcde_dss_get_overlay_info);
+
+void mcde_dss_get_native_resolution(struct mcde_display_device *ddev,
+ u16 *x_res, u16 *y_res)
+{
+ u16 x_tmp, y_tmp;
+ mutex_lock(&ddev->display_lock);
+ ddev->get_native_resolution(ddev, &x_tmp, &y_tmp);
+ if (ddev->orientation == MCDE_DISPLAY_ROT_90_CW ||
+ ddev->orientation == MCDE_DISPLAY_ROT_90_CCW) {
+ *x_res = y_tmp;
+ *y_res = x_tmp;
+ } else {
+ *x_res = x_tmp;
+ *y_res = y_tmp;
+ }
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_native_resolution);
+
+enum mcde_ovly_pix_fmt mcde_dss_get_default_pixel_format(
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_default_pixel_format(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_default_pixel_format);
+
+void mcde_dss_get_physical_size(struct mcde_display_device *ddev,
+ u16 *physical_width, u16 *physical_height)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_physical_size(ddev, physical_width, physical_height);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_physical_size);
+
+int mcde_dss_try_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->try_video_mode(ddev, video_mode);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_try_video_mode);
+
+int mcde_dss_set_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *vmode)
+{
+ int ret = 0;
+ struct mcde_video_mode old_vmode;
+
+ mutex_lock(&ddev->display_lock);
+ /* Do not perform set_video_mode if power mode is off */
+ if (ddev->get_power_mode(ddev) == MCDE_DISPLAY_PM_OFF)
+ goto power_mode_off;
+
+ ddev->get_video_mode(ddev, &old_vmode);
+ if (memcmp(vmode, &old_vmode, sizeof(old_vmode)) == 0)
+ goto same_video_mode;
+
+ ret = ddev->set_video_mode(ddev, vmode);
+ if (ret)
+ goto set_video_mode_failed;
+
+ if (ddev->invalidate_area)
+ ret = ddev->invalidate_area(ddev, NULL);
+power_mode_off:
+same_video_mode:
+set_video_mode_failed:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_video_mode);
+
+void mcde_dss_get_video_mode(struct mcde_display_device *ddev,
+ struct mcde_video_mode *video_mode)
+{
+ mutex_lock(&ddev->display_lock);
+ ddev->get_video_mode(ddev, video_mode);
+ mutex_unlock(&ddev->display_lock);
+}
+EXPORT_SYMBOL(mcde_dss_get_video_mode);
+
+int mcde_dss_set_pixel_format(struct mcde_display_device *ddev,
+ enum mcde_ovly_pix_fmt pix_fmt)
+{
+ enum mcde_ovly_pix_fmt old_pix_fmt;
+ int ret;
+
+ mutex_lock(&ddev->display_lock);
+ old_pix_fmt = ddev->get_pixel_format(ddev);
+ if (old_pix_fmt == pix_fmt) {
+ ret = 0;
+ goto same_pixel_format;
+ }
+
+ ret = ddev->set_pixel_format(ddev, pix_fmt);
+
+same_pixel_format:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_pixel_format);
+
+int mcde_dss_get_pixel_format(struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_pixel_format(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_pixel_format);
+
+int mcde_dss_set_rotation(struct mcde_display_device *ddev,
+ enum mcde_display_rotation rotation)
+{
+ int ret;
+ enum mcde_display_rotation old_rotation;
+
+ mutex_lock(&ddev->display_lock);
+ old_rotation = ddev->get_rotation(ddev);
+ if (old_rotation == rotation) {
+ ret = 0;
+ goto same_rotation;
+ }
+
+ ret = ddev->set_rotation(ddev, rotation);
+same_rotation:
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_set_rotation);
+
+enum mcde_display_rotation mcde_dss_get_rotation(
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ mutex_lock(&ddev->display_lock);
+ ret = ddev->get_rotation(ddev);
+ mutex_unlock(&ddev->display_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mcde_dss_get_rotation);
+
+int __init mcde_dss_init(void)
+{
+ return 0;
+}
+
+void mcde_dss_exit(void)
+{
+}
+
diff --git a/drivers/video/mcde/mcde_fb.c b/drivers/video/mcde/mcde_fb.c
new file mode 100644
index 00000000000..7d877ec3e4f
--- /dev/null
+++ b/drivers/video/mcde/mcde_fb.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE frame buffer driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/hwmem.h>
+#include <linux/io.h>
+
+#include <linux/console.h>
+
+#include <video/mcde_fb.h>
+
+#define MCDE_FB_BPP_MAX 16
+#define MCDE_FB_VXRES_MAX 1920
+#define MCDE_FB_VYRES_MAX 2160
+
+static struct fb_ops fb_ops;
+
+struct pix_fmt_info {
+ enum mcde_ovly_pix_fmt pix_fmt;
+
+ u32 bpp;
+ struct fb_bitfield r;
+ struct fb_bitfield g;
+ struct fb_bitfield b;
+ struct fb_bitfield a;
+ u32 nonstd;
+};
+
+struct pix_fmt_info pix_fmt_map[] = {
+ {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGB565,
+ .bpp = 16,
+ .r = { .offset = 11, .length = 5 },
+ .g = { .offset = 5, .length = 6 },
+ .b = { .offset = 0, .length = 5 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA5551,
+ .bpp = 16,
+ .r = { .offset = 11, .length = 5 },
+ .g = { .offset = 6, .length = 5 },
+ .b = { .offset = 1, .length = 5 },
+ .a = { .offset = 0, .length = 1 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA4444,
+ .bpp = 16,
+ .r = { .offset = 12, .length = 4 },
+ .g = { .offset = 8, .length = 4 },
+ .b = { .offset = 4, .length = 4 },
+ .a = { .offset = 0, .length = 4 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_YCbCr422,
+ .bpp = 16,
+ .nonstd = MCDE_OVLYPIXFMT_YCbCr422,
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGB888,
+ .bpp = 24,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBA8888,
+ .bpp = 32,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ .a = { .offset = 24, .length = 8 },
+ }, {
+ .pix_fmt = MCDE_OVLYPIXFMT_RGBX8888,
+ .bpp = 32,
+ .r = { .offset = 16, .length = 8 },
+ .g = { .offset = 8, .length = 8 },
+ .b = { .offset = 0, .length = 8 },
+ }
+
+};
+
+static struct platform_device mcde_fb_device = {
+ .name = "mcde_fb",
+ .id = -1,
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void early_suspend(struct early_suspend *data)
+{
+ int i;
+ struct mcde_fb *mfb =
+ container_of(data, struct mcde_fb, early_suspend);
+
+ console_lock();
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i] && mfb->ovlys[i]->ddev &&
+ (mfb->ovlys[i]->ddev->stay_alive == false))
+ mcde_dss_disable_display(mfb->ovlys[i]->ddev);
+ }
+ console_unlock();
+}
+
+static void late_resume(struct early_suspend *data)
+{
+ int i;
+ struct mcde_fb *mfb =
+ container_of(data, struct mcde_fb, early_suspend);
+
+ console_lock();
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i]) {
+ struct mcde_overlay *ovly = mfb->ovlys[i];
+ (void) mcde_dss_enable_display(ovly->ddev);
+ }
+ }
+ console_unlock();
+}
+#endif
+
+/* Helpers */
+
+static struct pix_fmt_info *find_pix_fmt_info(enum mcde_ovly_pix_fmt pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ if (pix_fmt_map[i].pix_fmt == pix_fmt)
+ return &pix_fmt_map[i];
+ }
+ return NULL;
+}
+
+static bool bitfield_cmp(struct fb_bitfield *bf1, struct fb_bitfield *bf2)
+{
+ return bf1->offset == bf2->offset &&
+ bf1->length == bf2->length &&
+ bf1->msb_right == bf2->msb_right;
+}
+
+static struct pix_fmt_info *var_to_pix_fmt_info(struct fb_var_screeninfo *var)
+{
+ int i;
+ struct pix_fmt_info *info;
+
+ if (var->nonstd)
+ return find_pix_fmt_info(var->nonstd);
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ info = &pix_fmt_map[i];
+ if (info->bpp == var->bits_per_pixel &&
+ bitfield_cmp(&info->r, &var->red) &&
+ bitfield_cmp(&info->g, &var->green) &&
+ bitfield_cmp(&info->b, &var->blue) &&
+ bitfield_cmp(&info->a, &var->transp))
+ return info;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) {
+ info = &pix_fmt_map[i];
+ if (var->bits_per_pixel == info->bpp)
+ return info;
+ }
+
+ return NULL;
+}
+
+static void pix_fmt_info_to_var(struct pix_fmt_info *pix_fmt_info,
+ struct fb_var_screeninfo *var)
+{
+ var->bits_per_pixel = pix_fmt_info->bpp;
+ var->nonstd = pix_fmt_info->nonstd;
+ var->red = pix_fmt_info->r;
+ var->green = pix_fmt_info->g;
+ var->blue = pix_fmt_info->b;
+ var->transp = pix_fmt_info->a;
+}
+
+static int init_var_fmt(struct fb_var_screeninfo *var,
+ u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt,
+ u32 rotate)
+{
+ struct pix_fmt_info *info;
+
+ info = find_pix_fmt_info(pix_fmt);
+ if (!info)
+ return -EINVAL;
+
+ var->bits_per_pixel = info->bpp;
+ var->nonstd = info->nonstd;
+ var->red = info->r;
+ var->green = info->g;
+ var->blue = info->b;
+ var->transp = info->a;
+ var->grayscale = false;
+
+ var->xres = w;
+ var->yres = h;
+ var->xres_virtual = vw;
+ var->yres_virtual = vh;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->rotate = rotate;
+
+ return 0;
+};
+
+static int reallocate_fb_mem(struct fb_info *fbi, u32 size)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ void *vaddr;
+ struct hwmem_alloc *alloc;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t num_mem_chunks = 1;
+ int name;
+
+ size = PAGE_ALIGN(size);
+
+ if (size == fbi->screen_size)
+ return 0;
+
+/* TODO: Remove once hwmem has support for defragmentation */
+#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
+ if (!mfb->alloc) {
+ u32 old_size = size;
+
+ size = MCDE_FB_BPP_MAX / 8 * MCDE_FB_VXRES_MAX *
+ MCDE_FB_VYRES_MAX;
+#endif
+
+ alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED,
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ name = hwmem_get_name(alloc);
+ if (name < 0) {
+ hwmem_release(alloc);
+ return name;
+ }
+
+ if (mfb->alloc) {
+ hwmem_kunmap(mfb->alloc);
+ hwmem_unpin(mfb->alloc);
+ hwmem_release(mfb->alloc);
+ }
+
+ (void)hwmem_pin(alloc, &mem_chunk, &num_mem_chunks);
+
+ vaddr = hwmem_kmap(alloc);
+ if (vaddr == NULL) {
+ hwmem_unpin(alloc);
+ hwmem_release(alloc);
+ return -ENOMEM;
+ }
+
+ mfb->alloc = alloc;
+ mfb->alloc_name = name;
+
+ fbi->screen_base = vaddr;
+ fbi->fix.smem_start = mem_chunk.paddr;
+
+#ifdef CONFIG_MCDE_FB_AVOID_REALLOC
+ size = old_size;
+ }
+#endif
+
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+}
+
+static void free_fb_mem(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (mfb->alloc) {
+ hwmem_kunmap(mfb->alloc);
+ hwmem_unpin(mfb->alloc);
+ hwmem_release(mfb->alloc);
+ mfb->alloc = NULL;
+ mfb->alloc_name = 0;
+
+ fbi->fix.smem_start = 0;
+ fbi->fix.smem_len = 0;
+ fbi->screen_base = 0;
+ fbi->screen_size = 0;
+ }
+}
+
+static void init_fb(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ strlcpy(fbi->fix.id, "mcde_fb", sizeof(fbi->fix.id));
+ fbi->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbi->fix.visual = FB_VISUAL_DIRECTCOLOR;
+ fbi->fix.xpanstep = 1;
+ fbi->fix.ypanstep = 1;
+ fbi->flags = FBINFO_HWACCEL_DISABLED;
+ fbi->fbops = &fb_ops;
+ fbi->pseudo_palette = &mfb->pseudo_palette[0];
+}
+
+static void get_ovly_info(struct fb_info *fbi, struct mcde_overlay *ovly,
+ struct mcde_overlay_info *info)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ memset(info, 0, sizeof(*info));
+ info->paddr = fbi->fix.smem_start +
+ fbi->fix.line_length * fbi->var.yoffset;
+ info->vaddr = (u32 *)(fbi->screen_base +
+ fbi->fix.line_length * fbi->var.yoffset);
+ /* TODO: move mem check to check_var/pan_display */
+ if (info->paddr + fbi->fix.line_length * fbi->var.yres >
+ fbi->fix.smem_start + fbi->fix.smem_len) {
+ info->paddr = fbi->fix.smem_start;
+ info->vaddr = (u32 *)fbi->screen_base;
+ }
+ info->fmt = mfb->pix_fmt;
+ info->stride = fbi->fix.line_length;
+ if (ovly) {
+ info->src_x = ovly->info.src_x;
+ info->src_y = ovly->info.src_y;
+ info->dst_x = ovly->info.dst_x;
+ info->dst_y = ovly->info.dst_y;
+ info->dst_z = 1;
+ } else {
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = 0;
+ info->dst_y = 0;
+ info->dst_z = 1;
+ }
+ info->w = fbi->var.xres;
+ info->h = fbi->var.yres;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = fbi->var.xres;
+ info->dirty.h = fbi->var.yres;
+}
+
+void vmode_to_var(struct mcde_video_mode *video_mode,
+ struct fb_var_screeninfo *var)
+{
+ /* TODO: use only 1 vbp and 1 vfp */
+ var->xres = video_mode->xres;
+ var->yres = video_mode->yres;
+ var->pixclock = video_mode->pixclock;
+ var->upper_margin = video_mode->vbp;
+ var->lower_margin = video_mode->vfp;
+ var->vsync_len = video_mode->vsw;
+ var->left_margin = video_mode->hbp;
+ var->right_margin = video_mode->hfp;
+ var->hsync_len = video_mode->hsw;
+ var->vmode &= ~FB_VMODE_INTERLACED;
+ var->vmode |= video_mode->interlaced ?
+ FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED;
+}
+
+void var_to_vmode(struct fb_var_screeninfo *var,
+ struct mcde_video_mode *video_mode)
+{
+ video_mode->xres = var->xres;
+ video_mode->yres = var->yres;
+ video_mode->pixclock = var->pixclock;
+ video_mode->vbp = var->upper_margin;
+ video_mode->vfp = var->lower_margin;
+ video_mode->vsw = var->vsync_len;
+ video_mode->hbp = var->left_margin;
+ video_mode->hfp = var->right_margin;
+ video_mode->hsw = var->hsync_len;
+ video_mode->interlaced = (var->vmode & FB_VMODE_INTERLACED) ==
+ FB_VMODE_INTERLACED;
+}
+
+enum mcde_display_rotation var_to_rotation(struct fb_var_screeninfo *var)
+{
+ enum mcde_display_rotation rot;
+
+ switch (var->rotate) {
+ case FB_ROTATE_UR:
+ rot = MCDE_DISPLAY_ROT_0;
+ break;
+ case FB_ROTATE_CW:
+ rot = MCDE_DISPLAY_ROT_90_CW;
+ break;
+ case FB_ROTATE_UD:
+ rot = MCDE_DISPLAY_ROT_180_CW;
+ break;
+ case FB_ROTATE_CCW:
+ rot = MCDE_DISPLAY_ROT_90_CCW;
+ break;
+ default:
+ rot = MCDE_DISPLAY_ROT_0;
+ break;
+ }
+ dev_vdbg(&mcde_fb_device.dev, "var_rot: %d -> mcde_rot: %d\n",
+ var->rotate, rot);
+ return rot;
+}
+
+static struct mcde_display_device *fb_to_display(struct fb_info *fbi)
+{
+ int i;
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i])
+ return mfb->ovlys[i]->ddev;
+ }
+ return NULL;
+}
+
+static int check_var(struct fb_var_screeninfo *var, struct fb_info *fbi,
+ struct mcde_display_device *ddev)
+{
+ int ret;
+ u16 w = -1, h = -1;
+ struct mcde_video_mode vmode;
+ struct pix_fmt_info *fmtinfo;
+
+ /* TODO: check sizes/offsets/memory validity */
+
+ /* Device physical size */
+ mcde_dss_get_physical_size(ddev, &w, &h);
+ var->width = w;
+ var->height = h;
+
+ /* Rotation */
+ if (var->rotate > 3) {
+ dev_info(&(ddev->dev), "check_var failed var->rotate\n");
+ return -EINVAL;
+ }
+
+ /* Video mode */
+ var_to_vmode(var, &vmode);
+ ret = mcde_dss_try_video_mode(ddev, &vmode);
+ if (ret < 0) {
+ dev_vdbg(&(ddev->dev), "check_var failed "
+ "mcde_dss_try_video_mode with size = %x\n", ret);
+ return ret;
+ }
+ vmode_to_var(&vmode, var);
+
+ /* Pixel format */
+ fmtinfo = var_to_pix_fmt_info(var);
+ if (!fmtinfo) {
+ dev_vdbg(&(ddev->dev), "check_var failed fmtinfo\n");
+ return -EINVAL;
+ }
+ pix_fmt_info_to_var(fmtinfo, var);
+
+ /* Not used */
+ var->grayscale = 0;
+ var->sync = 0;
+
+ return 0;
+}
+
+static int apply_var(struct fb_info *fbi, struct mcde_display_device *ddev)
+{
+ int ret, i;
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct fb_var_screeninfo *var;
+ struct mcde_video_mode vmode;
+ struct pix_fmt_info *fmt;
+ u32 line_len, size;
+
+ if (!ddev)
+ return -ENODEV;
+
+ dev_vdbg(&(ddev->dev), "%s\n", __func__);
+
+ var = &fbi->var;
+
+ ddev->check_transparency = 60;
+
+ /* Reallocate memory */
+ line_len = (fbi->var.bits_per_pixel * var->xres_virtual) / 8;
+ line_len = ALIGN(line_len, MCDE_BUF_LINE_ALIGMENT);
+ size = line_len * var->yres_virtual;
+ ret = reallocate_fb_mem(fbi, size);
+ if (ret) {
+ dev_vdbg(&(ddev->dev), "apply_var failed with"
+ "reallocate mem with size = %d\n", size);
+ return ret;
+ }
+ fbi->fix.line_length = line_len;
+
+ if (ddev->fictive)
+ goto apply_var_end;
+
+ /* Apply pixel format */
+ fmt = var_to_pix_fmt_info(var);
+ mfb->pix_fmt = fmt->pix_fmt;
+
+ /* Apply rotation */
+ mcde_dss_set_rotation(ddev, var_to_rotation(var));
+ /* Apply video mode */
+ memset(&vmode, 0, sizeof(struct mcde_video_mode));
+ var_to_vmode(var, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ return ret;
+
+ mcde_dss_apply_channel(ddev);
+
+ /* Apply overlay info */
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ struct mcde_overlay *ovly = mfb->ovlys[i];
+ struct mcde_overlay_info info;
+ int num_buffers;
+
+ get_ovly_info(fbi, ovly, &info);
+ (void) mcde_dss_apply_overlay(ovly, &info);
+
+ num_buffers = var->yres_virtual / var->yres;
+ mcde_dss_update_overlay(ovly, num_buffers == 3);
+ }
+
+apply_var_end:
+ return 0;
+}
+
+/* FB ops */
+
+static int mcde_fb_open(struct fb_info *fbi, int user)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int mcde_fb_release(struct fb_info *fbi, int user)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int mcde_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (!ddev) {
+ printk(KERN_ERR "mcde_fb_check_var failed !ddev\n");
+ return -ENODEV;
+ }
+
+ return check_var(var, fbi, ddev);
+}
+
+static int mcde_fb_set_par(struct fb_info *fbi)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (mfb->ovlys[0]->state == NULL &&
+ ddev->fictive == false) {
+ printk(KERN_INFO "%s() - Enable fb %p\n",
+ __func__,
+ mfb->ovlys[0]);
+ mcde_dss_enable_overlay(mfb->ovlys[0]);
+ }
+
+ return apply_var(fbi, ddev);
+}
+
+static int mcde_fb_blank(int blank, struct fb_info *fbi)
+{
+ int ret = 0;
+ struct mcde_display_device *ddev = fb_to_display(fbi);
+
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (ddev->fictive)
+ goto mcde_fb_blank_end;
+
+ switch (blank) {
+ case FB_BLANK_NORMAL:
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ mcde_dss_disable_display(ddev);
+ break;
+ case FB_BLANK_UNBLANK:
+ ret = mcde_dss_enable_display(ddev);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+mcde_fb_blank_end:
+ return ret;
+}
+
+static int mcde_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (var->xoffset == fbi->var.xoffset &&
+ var->yoffset == fbi->var.yoffset)
+ return 0;
+
+ fbi->var.xoffset = var->xoffset;
+ fbi->var.yoffset = var->yoffset;
+ return apply_var(fbi, fb_to_display(fbi));
+}
+
+static void mcde_fb_rotate(struct fb_info *fbi, int rotate)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+}
+
+static int mcde_fb_ioctl(struct fb_info *fbi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mcde_fb *mfb = to_mcde_fb(fbi);
+
+ if (cmd == MCDE_GET_BUFFER_NAME_IOC)
+ return mfb->alloc_name;
+
+ return -EINVAL;
+}
+
+static int mcde_fb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue, unsigned int transp,
+ struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ if (regno >= 256)
+ return 1;
+
+ if (regno < 17) {
+ u32 pseudo_val;
+ u32 r, g, b;
+
+ if (fbi->var.bits_per_pixel > 16) {
+ r = red >> 8;
+ g = green >> 8;
+ b = blue >> 8;
+ } else if (fbi->var.bits_per_pixel == 16) {
+ r = red >> (3 + 8);
+ g = green >> (2 + 8);
+ b = blue >> (3 + 8);
+ } else if (fbi->var.bits_per_pixel == 15) {
+ r = red >> (3 + 8);
+ g = green >> (3 + 8);
+ b = blue >> (3 + 8);
+ } else
+ r = b = g = (regno & 15);
+ pseudo_val = r << fbi->var.red.offset;
+ pseudo_val |= g << fbi->var.green.offset;
+ pseudo_val |= b << fbi->var.blue.offset;
+
+ ((u32 *)fbi->pseudo_palette)[regno] = pseudo_val;
+ }
+
+ return 0;
+}
+
+static int mcde_fb_setcmap(struct fb_cmap *cmap, struct fb_info *fbi)
+{
+ dev_vdbg(fbi->dev, "%s\n", __func__);
+
+ /*Nothing to see here, move along*/
+ return 0;
+}
+
+static struct fb_ops fb_ops = {
+ /* creg, cmap */
+ .owner = THIS_MODULE,
+ .fb_open = mcde_fb_open,
+ .fb_release = mcde_fb_release,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = mcde_fb_check_var,
+ .fb_set_par = mcde_fb_set_par,
+ .fb_blank = mcde_fb_blank,
+ .fb_pan_display = mcde_fb_pan_display,
+ .fb_rotate = mcde_fb_rotate,
+ .fb_ioctl = mcde_fb_ioctl,
+ .fb_setcolreg = mcde_fb_setcolreg,
+ .fb_setcmap = mcde_fb_setcmap,
+};
+
+/* FB driver */
+
+struct fb_info *mcde_fb_create(struct mcde_display_device *ddev,
+ u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt,
+ u32 rotate)
+{
+ int ret = 0;
+ struct fb_info *fbi;
+ struct mcde_fb *mfb;
+ struct mcde_overlay *ovly = NULL;
+ struct mcde_overlay_info ovly_info;
+
+ dev_vdbg(&ddev->dev, "%s\n", __func__);
+ if (!ddev->initialized) {
+ dev_warn(&ddev->dev, "%s: Device not initialized\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Init fb */
+ fbi = framebuffer_alloc(sizeof(struct mcde_fb), &mcde_fb_device.dev);
+ if (fbi == NULL) {
+ ret = -ENOMEM;
+ goto fb_alloc_failed;
+ }
+ init_fb(fbi);
+ mfb = to_mcde_fb(fbi);
+
+ if (ddev->fictive == false) {
+ ret = mcde_dss_open_channel(ddev);
+ if (ret)
+ goto channel_open_failed;
+
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto display_enable_failed;
+ }
+
+ /* Prepare var and allocate frame buffer memory */
+ init_var_fmt(&fbi->var, w, h, vw, vh, pix_fmt, rotate);
+ check_var(&fbi->var, fbi, ddev);
+ ret = apply_var(fbi, ddev);
+ if (ret)
+ goto apply_var_failed;
+
+ if (ddev->fictive == false)
+ mcde_dss_set_pixel_format(ddev, ddev->port->pixel_format);
+
+ /* Setup overlay */
+ get_ovly_info(fbi, NULL, &ovly_info);
+ ovly = mcde_dss_create_overlay(ddev, &ovly_info);
+ if (!ovly) {
+ ret = PTR_ERR(ovly);
+ goto ovly_alloc_failed;
+ }
+ mfb->ovlys[0] = ovly;
+ mfb->num_ovlys = 1;
+
+ if (ddev->fictive == false) {
+ ret = mcde_dss_enable_overlay(ovly);
+ if (ret)
+ goto ovly_enable_failed;
+ }
+
+ mfb->id = ddev->id;
+
+ /* Register framebuffer */
+ ret = register_framebuffer(fbi);
+ if (ret)
+ goto fb_register_failed;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret)
+ dev_warn(&ddev->dev, "%s: Allocate color map memory failed!\n",
+ __func__);
+
+ ddev->fbi = fbi;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (ddev->fictive == false) {
+ mfb->early_suspend.level =
+ EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ mfb->early_suspend.suspend = early_suspend;
+ mfb->early_suspend.resume = late_resume;
+ register_early_suspend(&mfb->early_suspend);
+ }
+#endif
+
+ goto out;
+fb_register_failed:
+ mcde_dss_disable_overlay(ovly);
+ovly_enable_failed:
+ mcde_dss_destroy_overlay(ovly);
+ovly_alloc_failed:
+ free_fb_mem(fbi);
+apply_var_failed:
+ mcde_dss_disable_display(ddev);
+display_enable_failed:
+ mcde_dss_close_channel(ddev);
+channel_open_failed:
+ framebuffer_release(fbi);
+ fbi = NULL;
+fb_alloc_failed:
+out:
+ return ret ? ERR_PTR(ret) : fbi;
+}
+EXPORT_SYMBOL(mcde_fb_create);
+
+int mcde_fb_attach_overlay(struct fb_info *fb_info, struct mcde_overlay *ovl)
+{
+ /* TODO: Attach extra overlay targets */
+ return -EINVAL;
+}
+
+void mcde_fb_destroy(struct mcde_display_device *dev)
+{
+ struct mcde_fb *mfb;
+ int i;
+
+ dev_vdbg(&dev->dev, "%s\n", __func__);
+
+ if (dev->fictive == false) {
+ mcde_dss_disable_display(dev);
+ mcde_dss_close_channel(dev);
+ }
+
+ mfb = to_mcde_fb(dev->fbi);
+ for (i = 0; i < mfb->num_ovlys; i++) {
+ if (mfb->ovlys[i])
+ mcde_dss_destroy_overlay(mfb->ovlys[i]);
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (dev->fictive == false)
+ unregister_early_suspend(&mfb->early_suspend);
+#endif
+ fb_dealloc_cmap(&dev->fbi->cmap);
+
+ unregister_framebuffer(dev->fbi);
+ free_fb_mem(dev->fbi);
+ framebuffer_release(dev->fbi);
+ dev->fbi = NULL;
+}
+
+/* Overlay fbs' platform device */
+static int mcde_fb_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int mcde_fb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver mcde_fb_driver = {
+ .probe = mcde_fb_probe,
+ .remove = mcde_fb_remove,
+ .driver = {
+ .name = "mcde_fb",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* MCDE fb init */
+
+int __init mcde_fb_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mcde_fb_driver);
+ if (ret)
+ goto fb_driver_failed;
+ ret = platform_device_register(&mcde_fb_device);
+ if (ret)
+ goto fb_device_failed;
+
+ goto out;
+fb_device_failed:
+ platform_driver_unregister(&mcde_fb_driver);
+fb_driver_failed:
+out:
+ return ret;
+}
+
+void mcde_fb_exit(void)
+{
+ platform_device_unregister(&mcde_fb_device);
+ platform_driver_unregister(&mcde_fb_driver);
+}
diff --git a/drivers/video/mcde/mcde_hw.c b/drivers/video/mcde/mcde_hw.c
new file mode 100644
index 00000000000..92cdb1ef7a9
--- /dev/null
+++ b/drivers/video/mcde/mcde_hw.c
@@ -0,0 +1,3834 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson MCDE base driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <video/mcde.h>
+#include "dsilink_regs.h"
+#include "mcde_regs.h"
+#include "mcde_debugfs.h"
+
+
+/* MCDE channel states
+ *
+ * Allowed state transitions:
+ * IDLE <-> SUSPEND
+ * IDLE <-> DSI_READ
+ * IDLE <-> DSI_WRITE
+ * IDLE -> SETUP -> (WAIT_TE ->) RUNNING -> STOPPING1 -> STOPPING2 -> IDLE
+ * WAIT_TE -> STOPPED (for missing TE to allow re-enable)
+ */
+enum chnl_state {
+ CHNLSTATE_SUSPEND, /* HW in suspended mode, initial state */
+ CHNLSTATE_IDLE, /* Channel aquired, but not running, FLOEN==0 */
+ CHNLSTATE_DSI_READ, /* Executing DSI read */
+ CHNLSTATE_DSI_WRITE, /* Executing DSI write */
+ CHNLSTATE_SETUP, /* Channel register setup to prepare for running */
+ CHNLSTATE_WAIT_TE, /* Waiting for BTA or external TE */
+ CHNLSTATE_RUNNING, /* Update started, FLOEN=1, FLOEN==1 */
+ CHNLSTATE_STOPPING, /* Stopping, FLOEN=0, FLOEN==1, awaiting VCMP */
+ CHNLSTATE_STOPPED, /* Stopped, after VCMP, FLOEN==0|1 */
+};
+
+enum dsi_lane_status {
+ DSI_LANE_STATE_START = 0x00,
+ DSI_LANE_STATE_IDLE = 0x01,
+ DSI_LANE_STATE_WRITE = 0x02,
+ DSI_LANE_STATE_ULPM = 0x03,
+};
+
+static int set_channel_state_atomic(struct mcde_chnl_state *chnl,
+ enum chnl_state state);
+static int set_channel_state_sync(struct mcde_chnl_state *chnl,
+ enum chnl_state state);
+static void stop_channel(struct mcde_chnl_state *chnl);
+static int _mcde_chnl_enable(struct mcde_chnl_state *chnl);
+static int _mcde_chnl_apply(struct mcde_chnl_state *chnl);
+static void disable_flow(struct mcde_chnl_state *chnl);
+static void enable_flow(struct mcde_chnl_state *chnl);
+static void do_softwaretrig(struct mcde_chnl_state *chnl);
+static void dsi_te_poll_req(struct mcde_chnl_state *chnl);
+static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl,
+ unsigned int timeout);
+static void dsi_te_timer_function(unsigned long value);
+static int wait_for_vcmp(struct mcde_chnl_state *chnl);
+static int probe_hw(struct platform_device *pdev);
+static void wait_for_flow_disabled(struct mcde_chnl_state *chnl);
+
+#define OVLY_TIMEOUT 100
+#define CHNL_TIMEOUT 100
+#define FLOW_STOP_TIMEOUT 20
+#define SCREEN_PPL_HIGH 1280
+#define SCREEN_PPL_CEA2 720
+#define SCREEN_LPF_CEA2 480
+#define DSI_DELAY0_CEA2_ADD 10
+
+#define MCDE_SLEEP_WATCHDOG 500
+#define DSI_TE_NO_ANSWER_TIMEOUT_INIT 2500
+#define DSI_TE_NO_ANSWER_TIMEOUT 250
+#define DSI_WAIT_FOR_ULPM_STATE_MS 1
+#define DSI_ULPM_STATE_NBR_OF_RETRIES 10
+#define DSI_READ_TIMEOUT 200
+#define DSI_WRITE_CMD_TIMEOUT 1000
+#define DSI_READ_DELAY 5
+#define DSI_READ_NBR_OF_RETRIES 2
+#define MCDE_FLOWEN_MAX_TRIAL 60
+
+#define MCDE_VERSION_4_1_3 0x04010300
+#define MCDE_VERSION_4_0_4 0x04000400
+#define MCDE_VERSION_3_0_8 0x03000800
+#define MCDE_VERSION_3_0_5 0x03000500
+#define MCDE_VERSION_1_0_4 0x01000400
+
+#define CLK_MCDE "mcde"
+#define CLK_DPI "lcd"
+
+static u8 *mcdeio;
+static u8 **dsiio;
+static struct platform_device *mcde_dev;
+static u8 num_dsilinks;
+static u8 num_channels;
+static u8 num_overlays;
+static int mcde_irq;
+static u32 input_fifo_size;
+static u32 output_fifo_ab_size;
+static u32 output_fifo_c0c1_size;
+
+static struct regulator *regulator_vana;
+static struct regulator *regulator_mcde_epod;
+static struct regulator *regulator_esram_epod;
+static struct clk *clock_mcde;
+
+/* TODO remove when all platforms support dsilp and dsihs clocks */
+static struct clk *clock_dsi;
+static struct clk *clock_dsi_lp;
+
+static u8 mcde_is_enabled;
+static struct delayed_work hw_timeout_work;
+static u8 dsi_pll_is_enabled;
+static u8 dsi_ifc_is_supported;
+static u8 dsi_use_clk_framework;
+static u32 mcde_clk_rate; /* In Hz */
+
+static struct mutex mcde_hw_lock;
+static inline void mcde_lock(const char *func, int line)
+{
+ mutex_lock(&mcde_hw_lock);
+ dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line);
+}
+
+static inline void mcde_unlock(const char *func, int line)
+{
+ dev_vdbg(&mcde_dev->dev, "Exit MCDE: %s:%d\n", func, line);
+ mutex_unlock(&mcde_hw_lock);
+}
+
+static inline bool mcde_trylock(const char *func, int line)
+{
+ bool locked = mutex_trylock(&mcde_hw_lock) == 1;
+ if (locked)
+ dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line);
+ return locked;
+}
+
+static u8 mcde_dynamic_power_management = true;
+
+static inline u32 dsi_rreg(int i, u32 reg)
+{
+ return readl(dsiio[i] + reg);
+}
+static inline void dsi_wreg(int i, u32 reg, u32 val)
+{
+ writel(val, dsiio[i] + reg);
+}
+
+#define dsi_rfld(__i, __reg, __fld) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ ((dsi_rreg(__i, __reg) & mask) >> shift); \
+})
+
+#define dsi_wfld(__i, __reg, __fld, __val) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ const u32 oldval = dsi_rreg(__i, __reg); \
+ const u32 newval = ((__val) << shift); \
+ dsi_wreg(__i, __reg, (oldval & ~mask) | (newval & mask)); \
+})
+
+static inline u32 mcde_rreg(u32 reg)
+{
+ return readl(mcdeio + reg);
+}
+static inline void mcde_wreg(u32 reg, u32 val)
+{
+ writel(val, mcdeio + reg);
+}
+
+
+#define mcde_rfld(__reg, __fld) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ ((mcde_rreg(__reg) & mask) >> shift); \
+})
+
+#define mcde_wfld(__reg, __fld, __val) \
+({ \
+ const u32 mask = __reg##_##__fld##_MASK; \
+ const u32 shift = __reg##_##__fld##_SHIFT; \
+ const u32 oldval = mcde_rreg(__reg); \
+ const u32 newval = ((__val) << shift); \
+ mcde_wreg(__reg, (oldval & ~mask) | (newval & mask)); \
+})
+
+struct ovly_regs {
+ bool enabled;
+ bool dirty;
+ bool dirty_buf;
+
+ u8 ch_id;
+ u32 baseaddress0;
+ u32 baseaddress1;
+ u8 bits_per_pixel;
+ u8 bpp;
+ bool bgr;
+ bool bebo;
+ bool opq;
+ u8 col_conv;
+ u8 alpha_source;
+ u8 alpha_value;
+ u8 pixoff;
+ u16 ppl;
+ u16 lpf;
+ u16 cropx;
+ u16 cropy;
+ u16 xpos;
+ u16 ypos;
+ u8 z;
+};
+
+struct mcde_ovly_state {
+ bool inuse;
+ u8 idx; /* MCDE overlay index */
+ struct mcde_chnl_state *chnl; /* Owner channel */
+ bool dirty;
+ bool dirty_buf;
+
+ /* Staged settings */
+ u32 paddr;
+ u16 stride;
+ enum mcde_ovly_pix_fmt pix_fmt;
+
+ u16 src_x;
+ u16 src_y;
+ u16 dst_x;
+ u16 dst_y;
+ u16 dst_z;
+ u16 w;
+ u16 h;
+
+ u8 alpha_source;
+ u8 alpha_value;
+
+ /* Applied settings */
+ struct ovly_regs regs;
+};
+
+static struct mcde_ovly_state *overlays;
+
+struct chnl_regs {
+ bool dirty;
+
+ bool floen;
+ u16 x;
+ u16 y;
+ u16 ppl;
+ u16 lpf;
+ u8 bpp;
+ bool internal_clk; /* CLKTYPE field */
+ u16 pcd;
+ u8 clksel;
+ u8 cdwin;
+ u16 (*map_r)(u8);
+ u16 (*map_g)(u8);
+ u16 (*map_b)(u8);
+ bool palette_enable;
+ bool bcd;
+ bool roten;
+ u8 rotdir;
+ u32 rotbuf1;
+ u32 rotbuf2;
+ u32 rotbufsize;
+
+ /* Blending */
+ u8 blend_ctrl;
+ bool blend_en;
+ u8 alpha_blend;
+
+ /* DSI */
+ u8 dsipacking;
+};
+
+struct col_regs {
+ bool dirty;
+
+ u16 y_red;
+ u16 y_green;
+ u16 y_blue;
+ u16 cb_red;
+ u16 cb_green;
+ u16 cb_blue;
+ u16 cr_red;
+ u16 cr_green;
+ u16 cr_blue;
+ u16 off_y;
+ u16 off_cb;
+ u16 off_cr;
+};
+
+struct tv_regs {
+ bool dirty;
+
+ u16 dho; /* TV mode: left border width; destination horizontal offset */
+ /* LCD MODE: horizontal back porch */
+ u16 alw; /* TV mode: right border width */
+ /* LCD mode: horizontal front porch */
+ u16 hsw; /* horizontal synch width */
+ u16 dvo; /* TV mode: top border width; destination horizontal offset */
+ /* LCD MODE: vertical back porch */
+ u16 bsl; /* TV mode: bottom border width; blanking start line */
+ /* LCD MODE: vertical front porch */
+ /* field 1 */
+ u16 bel1; /* TV mode: field total vertical blanking lines */
+ /* LCD mode: vertical sync width */
+ u16 fsl1; /* field vbp */
+ /* field 2 */
+ u16 bel2;
+ u16 fsl2;
+ u8 tv_mode;
+ bool sel_mode_tv;
+ bool inv_clk;
+ bool interlaced_en;
+ u32 lcdtim1;
+};
+
+struct mcde_chnl_state {
+ bool enabled;
+ bool reserved;
+ enum mcde_chnl id;
+ enum mcde_fifo fifo;
+ struct mcde_port port;
+ struct mcde_ovly_state *ovly0;
+ struct mcde_ovly_state *ovly1;
+ enum chnl_state state;
+ wait_queue_head_t state_waitq;
+ wait_queue_head_t vcmp_waitq;
+ atomic_t vcmp_cnt;
+ struct timer_list dsi_te_timer;
+ struct clk *clk_dsi_lp;
+ struct clk *clk_dsi_hs;
+ struct clk *clk_dpi;
+
+ enum mcde_display_power_mode power_mode;
+
+ /* Staged settings */
+ u16 (*map_r)(u8);
+ u16 (*map_g)(u8);
+ u16 (*map_b)(u8);
+ bool palette_enable;
+ struct mcde_video_mode vmode;
+ enum mcde_display_rotation rotation;
+ u32 rotbuf1;
+ u32 rotbuf2;
+ u32 rotbufsize;
+
+ struct mcde_col_transform rgb_2_ycbcr;
+ struct mcde_col_transform ycbcr_2_rgb;
+ struct mcde_col_transform *transform;
+
+ /* Blending */
+ u8 blend_ctrl;
+ bool blend_en;
+ u8 alpha_blend;
+
+ /* Applied settings */
+ struct chnl_regs regs;
+ struct col_regs col_regs;
+ struct tv_regs tv_regs;
+
+ /* an interlaced digital TV signal generates a VCMP per field */
+ bool vcmp_per_field;
+ bool even_vcmp;
+
+ bool formatter_updated;
+ bool esram_is_enabled;
+};
+
+static struct mcde_chnl_state *channels;
+/*
+ * Wait for CSM_RUNNING, all data sent for display
+ */
+static inline void wait_while_dsi_running(int lnk)
+{
+ u8 counter = DSI_READ_TIMEOUT;
+ while (dsi_rfld(lnk, DSI_CMD_MODE_STS, CSM_RUNNING) && --counter) {
+ dev_vdbg(&mcde_dev->dev,
+ "%s: DSI link %u read running state retry %u times\n"
+ , __func__, lnk, (DSI_READ_TIMEOUT - counter));
+ udelay(DSI_READ_DELAY);
+ }
+ WARN_ON(!counter);
+ if (!counter)
+ dev_warn(&mcde_dev->dev,
+ "%s: DSI link %u read timeout!\n", __func__, lnk);
+}
+
+static void enable_clocks_and_power(struct platform_device *pdev)
+{
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /* VANA should be enabled before a DSS hard reset */
+ if (regulator_vana)
+ WARN_ON_ONCE(regulator_enable(regulator_vana));
+
+ WARN_ON_ONCE(regulator_enable(regulator_mcde_epod));
+
+ if (!dsi_use_clk_framework)
+ pdata->platform_set_clocks();
+
+ WARN_ON_ONCE(clk_enable(clock_mcde));
+}
+
+static void disable_clocks_and_power(struct platform_device *pdev)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ clk_disable(clock_mcde);
+
+ WARN_ON_ONCE(regulator_disable(regulator_mcde_epod));
+
+ if (regulator_vana)
+ WARN_ON_ONCE(regulator_disable(regulator_vana));
+}
+
+static void update_mcde_registers(void)
+{
+ struct mcde_platform_data *pdata = mcde_dev->dev.platform_data;
+
+ /* Setup output muxing */
+ mcde_wreg(MCDE_CONF0,
+ MCDE_CONF0_IFIFOCTRLWTRMRKLVL(7) |
+ MCDE_CONF0_OUTMUX0(pdata->outmux[0]) |
+ MCDE_CONF0_OUTMUX1(pdata->outmux[1]) |
+ MCDE_CONF0_OUTMUX2(pdata->outmux[2]) |
+ MCDE_CONF0_OUTMUX3(pdata->outmux[3]) |
+ MCDE_CONF0_OUTMUX4(pdata->outmux[4]) |
+ pdata->syncmux);
+
+ mcde_wfld(MCDE_RISPP, VCMPARIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPBRIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPC0RIS, 1);
+ mcde_wfld(MCDE_RISPP, VCMPC1RIS, 1);
+
+ /* Enable channel VCMP interrupts */
+ mcde_wreg(MCDE_IMSCPP,
+ MCDE_IMSCPP_VCMPAIM(true) |
+ MCDE_IMSCPP_VCMPBIM(true) |
+ MCDE_IMSCPP_VCMPC0IM(true) |
+ MCDE_IMSCPP_VCMPC1IM(true));
+
+ mcde_wreg(MCDE_IMSCCHNL, MCDE_IMSCCHNL_CHNLAIM(0xf));
+ mcde_wreg(MCDE_IMSCERR, 0xFFFF01FF);
+
+ /* Setup sync pulse length
+ * Setting VSPMAX=0 disables the filter and VSYNC
+ * is generated after VSPMIN mcde cycles
+ */
+ mcde_wreg(MCDE_VSCRC0,
+ MCDE_VSCRC0_VSPMIN(0) |
+ MCDE_VSCRC0_VSPMAX(0));
+ mcde_wreg(MCDE_VSCRC1,
+ MCDE_VSCRC1_VSPMIN(1) |
+ MCDE_VSCRC1_VSPMAX(0xff));
+}
+
+static void dsi_link_handle_reset(u8 link, bool release)
+{
+ u32 value;
+
+ value = prcmu_read(DB8500_PRCM_DSI_SW_RESET);
+ if (release) {
+ switch (link) {
+ case 0:
+ value |= DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN;
+ break;
+ case 1:
+ value |= DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN;
+ break;
+ case 2:
+ value |= DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (link) {
+ case 0:
+ value &= ~DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN;
+ break;
+ case 1:
+ value &= ~DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN;
+ break;
+ case 2:
+ value &= ~DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN;
+ break;
+ default:
+ break;
+ }
+ }
+ prcmu_write(DB8500_PRCM_DSI_SW_RESET, value);
+}
+
+static void dsi_link_switch_byte_clk(u8 link, bool to_system_clock)
+{
+ u32 value;
+
+ value = prcmu_read(DB8500_PRCM_DSI_GLITCHFREE_EN);
+ if (to_system_clock) {
+ switch (link) {
+ case 0:
+ value |= DB8500_PRCM_DSI_GLITCHFREE_EN_DSI0_BYTE_CLK;
+ break;
+ case 1:
+ value |= DB8500_PRCM_DSI_GLITCHFREE_EN_DSI1_BYTE_CLK;
+ break;
+ case 2:
+ value |= DB8500_PRCM_DSI_GLITCHFREE_EN_DSI2_BYTE_CLK;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (link) {
+ case 0:
+ value &= ~DB8500_PRCM_DSI_GLITCHFREE_EN_DSI0_BYTE_CLK;
+ break;
+ case 1:
+ value &= ~DB8500_PRCM_DSI_GLITCHFREE_EN_DSI1_BYTE_CLK;
+ break;
+ case 2:
+ value &= ~DB8500_PRCM_DSI_GLITCHFREE_EN_DSI2_BYTE_CLK;
+ break;
+ default:
+ break;
+ }
+
+ }
+ prcmu_write(DB8500_PRCM_DSI_GLITCHFREE_EN, value);
+ dsi_wfld(link, DSI_MCTL_PLL_CTL, PLL_OUT_SEL, to_system_clock);
+}
+
+static void dsi_link_handle_ulpm(struct mcde_port *port, bool enter_ulpm)
+{
+ u8 link = port->link;
+ u8 num_data_lanes = port->phy.dsi.num_data_lanes;
+ u8 nbr_of_retries = 0;
+ u8 lane_state;
+
+ /*
+ * The D-PHY protocol specifies the time to leave the ULP mode
+ * in ms. It will at least take 1 ms to exit ULPM.
+ * The ULPOUT time value is using number of system clock ticks
+ * divided by 1000. The system clock for the DSI link is the MCDE
+ * clock.
+ */
+ dsi_wreg(link, DSI_MCTL_ULPOUT_TIME,
+ DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(0x1FF) |
+ DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(0x1FF));
+
+ if (enter_ulpm) {
+ lane_state = DSI_LANE_STATE_ULPM;
+ dsi_link_switch_byte_clk(link, true);
+ }
+
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, enter_ulpm);
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ,
+ enter_ulpm && num_data_lanes == 2);
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, enter_ulpm);
+
+ if (!enter_ulpm) {
+ lane_state = DSI_LANE_STATE_IDLE;
+ dsi_link_switch_byte_clk(link, false);
+ }
+
+ /* Wait for data lanes to enter ULPM */
+ while (dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE1_STATE)
+ != lane_state ||
+ (dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE2_STATE)
+ != lane_state &&
+ num_data_lanes > 1)) {
+ mdelay(DSI_WAIT_FOR_ULPM_STATE_MS);
+ if (nbr_of_retries++ == DSI_ULPM_STATE_NBR_OF_RETRIES) {
+ dev_dbg(&mcde_dev->dev,
+ "Could not enter correct state=%d (link=%d)!\n",
+ lane_state, link);
+ break;
+ }
+ }
+
+ nbr_of_retries = 0;
+ /* Wait for clock lane to enter ULPM */
+ while (dsi_rfld(link, DSI_MCTL_LANE_STS, CLKLANE_STATE)
+ != lane_state) {
+ mdelay(DSI_WAIT_FOR_ULPM_STATE_MS);
+ if (nbr_of_retries++ == DSI_ULPM_STATE_NBR_OF_RETRIES) {
+ dev_dbg(&mcde_dev->dev,
+ "Could not enter correct state=%d (link=%d)!\n",
+ lane_state, link);
+ break;
+ }
+ }
+}
+
+static int dsi_link_enable(struct mcde_chnl_state *chnl)
+{
+ int ret = 0;
+ u8 link = chnl->port.link;
+
+ if (dsi_use_clk_framework) {
+ WARN_ON_ONCE(clk_enable(chnl->clk_dsi_lp));
+ WARN_ON_ONCE(clk_enable(chnl->clk_dsi_hs));
+ dsi_link_handle_reset(link, true);
+ } else {
+ WARN_ON_ONCE(clk_enable(clock_dsi));
+ WARN_ON_ONCE(clk_enable(clock_dsi_lp));
+
+ if (!dsi_pll_is_enabled) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ ret = pdata->platform_enable_dsipll();
+ if (ret < 0) {
+ dev_warn(&mcde_dev->dev, "%s: "
+ "enable_dsipll failed ret = %d\n",
+ __func__, ret);
+ goto enable_dsipll_err;
+ }
+ dev_dbg(&mcde_dev->dev, "%s enable dsipll\n",
+ __func__);
+ }
+ dsi_pll_is_enabled++;
+ }
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, LINK_EN, true);
+
+ dev_dbg(&mcde_dev->dev, "DSI%d LINK_EN\n", link);
+
+ return 0;
+
+enable_dsipll_err:
+ clk_disable(clock_dsi_lp);
+ clk_disable(clock_dsi);
+ return ret;
+}
+
+static void dsi_link_disable(struct mcde_chnl_state *chnl, bool suspend)
+{
+ wait_while_dsi_running(chnl->port.link);
+ dsi_link_handle_ulpm(&chnl->port, true);
+ if (dsi_use_clk_framework) {
+ clk_disable(chnl->clk_dsi_lp);
+ clk_disable(chnl->clk_dsi_hs);
+ } else {
+ if (dsi_pll_is_enabled && (--dsi_pll_is_enabled == 0)) {
+ struct mcde_platform_data *pdata =
+ mcde_dev->dev.platform_data;
+ dev_dbg(&mcde_dev->dev, "%s disable dsipll\n",
+ __func__);
+ pdata->platform_disable_dsipll();
+ }
+ clk_disable(clock_dsi);
+ clk_disable(clock_dsi_lp);
+ }
+}
+
+static void disable_mcde_hw(bool force_disable, bool suspend)
+{
+ int i;
+ bool mcde_up = false;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!mcde_is_enabled)
+ return;
+
+ for (i = 0; i < num_channels; i++) {
+ struct mcde_chnl_state *chnl = &channels[i];
+ if (force_disable || (chnl->enabled &&
+ chnl->state != CHNLSTATE_RUNNING)) {
+ stop_channel(chnl);
+ set_channel_state_sync(chnl, CHNLSTATE_SUSPEND);
+
+ if (chnl->formatter_updated) {
+ if (chnl->port.type == MCDE_PORTTYPE_DSI)
+ dsi_link_disable(chnl, suspend);
+ else if (chnl->port.type == MCDE_PORTTYPE_DPI)
+ clk_disable(chnl->clk_dpi);
+ chnl->formatter_updated = false;
+ }
+ if (chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_disable(
+ regulator_esram_epod));
+ chnl->esram_is_enabled = false;
+ }
+ } else if (chnl->enabled && chnl->state == CHNLSTATE_RUNNING) {
+ mcde_up = true;
+ }
+ }
+
+ if (mcde_up)
+ return;
+
+ free_irq(mcde_irq, &mcde_dev->dev);
+
+ disable_clocks_and_power(mcde_dev);
+
+ mcde_is_enabled = false;
+}
+
+static void dpi_video_mode_apply(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl->tv_regs.interlaced_en = chnl->vmode.interlaced;
+
+ chnl->tv_regs.sel_mode_tv = chnl->port.phy.dpi.tv_mode;
+ if (chnl->tv_regs.sel_mode_tv) {
+ /* TV mode */
+ u32 bel;
+ /* -4 since hsw is excluding SAV/EAV, 2 bytes each */
+ chnl->tv_regs.hsw = chnl->vmode.hbp + chnl->vmode.hfp - 4;
+ /* vbp_field2 = vbp_field1 + 1 */
+ chnl->tv_regs.fsl1 = chnl->vmode.vbp / 2;
+ chnl->tv_regs.fsl2 = chnl->vmode.vbp - chnl->tv_regs.fsl1;
+ /* +1 since vbp_field2 = vbp_field1 + 1 */
+ bel = chnl->vmode.vbp + chnl->vmode.vfp;
+ /* in TV mode: bel2 = bel1 + 1 */
+ chnl->tv_regs.bel1 = bel / 2;
+ chnl->tv_regs.bel2 = bel - chnl->tv_regs.bel1;
+ if (chnl->port.phy.dpi.bus_width == 4)
+ chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P_BE;
+ else
+ chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P;
+ chnl->tv_regs.inv_clk = true;
+ } else {
+ /* LCD mode */
+ u32 polarity;
+ chnl->tv_regs.hsw = chnl->vmode.hsw;
+ chnl->tv_regs.dho = chnl->vmode.hbp;
+ chnl->tv_regs.alw = chnl->vmode.hfp;
+ chnl->tv_regs.bel1 = chnl->vmode.vsw;
+ chnl->tv_regs.bel2 = chnl->tv_regs.bel1;
+ chnl->tv_regs.dvo = chnl->vmode.vbp;
+ chnl->tv_regs.bsl = chnl->vmode.vfp;
+ chnl->tv_regs.fsl1 = 0;
+ chnl->tv_regs.fsl2 = 0;
+ polarity = chnl->port.phy.dpi.polarity;
+ chnl->tv_regs.lcdtim1 = MCDE_LCDTIM1A_IHS(
+ (polarity & DPI_ACT_LOW_HSYNC) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IVS(
+ (polarity & DPI_ACT_LOW_VSYNC) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IOE(
+ (polarity & DPI_ACT_LOW_DATA_ENABLE) != 0);
+ chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IPC(
+ (polarity & DPI_ACT_ON_FALLING_EDGE) != 0);
+ }
+ chnl->tv_regs.dirty = true;
+}
+
+static void update_dpi_registers(enum mcde_chnl chnl_id, struct tv_regs *regs)
+{
+ u8 idx = chnl_id;
+
+ dev_dbg(&mcde_dev->dev, "%s\n", __func__);
+ mcde_wreg(MCDE_TVCRA + idx * MCDE_TVCRA_GROUPOFFSET,
+ MCDE_TVCRA_SEL_MOD(regs->sel_mode_tv) |
+ MCDE_TVCRA_INTEREN(regs->interlaced_en) |
+ MCDE_TVCRA_IFIELD(0) |
+ MCDE_TVCRA_TVMODE(regs->tv_mode) |
+ MCDE_TVCRA_SDTVMODE(MCDE_TVCRA_SDTVMODE_Y0CBY1CR) |
+ MCDE_TVCRA_CKINV(regs->inv_clk) |
+ MCDE_TVCRA_AVRGEN(0));
+ mcde_wreg(MCDE_TVBLUA + idx * MCDE_TVBLUA_GROUPOFFSET,
+ MCDE_TVBLUA_TVBLU(MCDE_CONFIG_TVOUT_BACKGROUND_LUMINANCE) |
+ MCDE_TVBLUA_TVBCB(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CB)|
+ MCDE_TVBLUA_TVBCR(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CR));
+
+ /* Vertical timing registers */
+ mcde_wreg(MCDE_TVDVOA + idx * MCDE_TVDVOA_GROUPOFFSET,
+ MCDE_TVDVOA_DVO1(regs->dvo) |
+ MCDE_TVDVOA_DVO2(regs->dvo));
+ mcde_wreg(MCDE_TVBL1A + idx * MCDE_TVBL1A_GROUPOFFSET,
+ MCDE_TVBL1A_BEL1(regs->bel1) |
+ MCDE_TVBL1A_BSL1(regs->bsl));
+ mcde_wreg(MCDE_TVBL2A + idx * MCDE_TVBL1A_GROUPOFFSET,
+ MCDE_TVBL2A_BEL2(regs->bel2) |
+ MCDE_TVBL2A_BSL2(regs->bsl));
+ mcde_wreg(MCDE_TVISLA + idx * MCDE_TVISLA_GROUPOFFSET,
+ MCDE_TVISLA_FSL1(regs->fsl1) |
+ MCDE_TVISLA_FSL2(regs->fsl2));
+
+ /* Horizontal timing registers */
+ mcde_wreg(MCDE_TVLBALWA + idx * MCDE_TVLBALWA_GROUPOFFSET,
+ MCDE_TVLBALWA_LBW(regs->hsw) |
+ MCDE_TVLBALWA_ALW(regs->alw));
+ mcde_wreg(MCDE_TVTIM1A + idx * MCDE_TVTIM1A_GROUPOFFSET,
+ MCDE_TVTIM1A_DHO(regs->dho));
+ if (!regs->sel_mode_tv)
+ mcde_wreg(MCDE_LCDTIM1A + idx * MCDE_LCDTIM1A_GROUPOFFSET,
+ regs->lcdtim1);
+ regs->dirty = false;
+}
+
+static void update_col_registers(enum mcde_chnl chnl_id, struct col_regs *regs)
+{
+ u8 idx = chnl_id;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ mcde_wreg(MCDE_RGBCONV1A + idx * MCDE_RGBCONV1A_GROUPOFFSET,
+ MCDE_RGBCONV1A_YR_RED(regs->y_red) |
+ MCDE_RGBCONV1A_YR_GREEN(regs->y_green));
+ mcde_wreg(MCDE_RGBCONV2A + idx * MCDE_RGBCONV2A_GROUPOFFSET,
+ MCDE_RGBCONV2A_YR_BLUE(regs->y_blue) |
+ MCDE_RGBCONV2A_CR_RED(regs->cr_red));
+ mcde_wreg(MCDE_RGBCONV3A + idx * MCDE_RGBCONV3A_GROUPOFFSET,
+ MCDE_RGBCONV3A_CR_GREEN(regs->cr_green) |
+ MCDE_RGBCONV3A_CR_BLUE(regs->cr_blue));
+ mcde_wreg(MCDE_RGBCONV4A + idx * MCDE_RGBCONV4A_GROUPOFFSET,
+ MCDE_RGBCONV4A_CB_RED(regs->cb_red) |
+ MCDE_RGBCONV4A_CB_GREEN(regs->cb_green));
+ mcde_wreg(MCDE_RGBCONV5A + idx * MCDE_RGBCONV5A_GROUPOFFSET,
+ MCDE_RGBCONV5A_CB_BLUE(regs->cb_blue) |
+ MCDE_RGBCONV5A_OFF_RED(regs->off_cr));
+ mcde_wreg(MCDE_RGBCONV6A + idx * MCDE_RGBCONV6A_GROUPOFFSET,
+ MCDE_RGBCONV6A_OFF_GREEN(regs->off_y) |
+ MCDE_RGBCONV6A_OFF_BLUE(regs->off_cb));
+ regs->dirty = false;
+}
+
+/* MCDE internal helpers */
+static u8 portfmt2dsipacking(enum mcde_port_pix_fmt pix_fmt)
+{
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ return MCDE_DSIVID0CONF0_PACKING_RGB565;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ return MCDE_DSIVID0CONF0_PACKING_RGB666;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ default:
+ return MCDE_DSIVID0CONF0_PACKING_RGB888;
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ return MCDE_DSIVID0CONF0_PACKING_HDTV;
+ }
+}
+
+static u8 portfmt2bpp(enum mcde_port_pix_fmt pix_fmt)
+{
+ /* TODO: Check DPI spec *//* REVIEW: Remove or check */
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DPI_16BPP_C1:
+ case MCDE_PORTPIXFMT_DPI_16BPP_C2:
+ case MCDE_PORTPIXFMT_DPI_16BPP_C3:
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ case MCDE_PORTPIXFMT_DSI_YCBCR422:
+ return 16;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C1:
+ case MCDE_PORTPIXFMT_DPI_18BPP_C2:
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ return 18;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ case MCDE_PORTPIXFMT_DPI_24BPP:
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ return 24;
+ default:
+ return 1;
+ }
+}
+
+static u8 bpp2outbpp(u8 bpp)
+{
+ switch (bpp) {
+ case 16:
+ return MCDE_CRA1_OUTBPP_16BPP;
+ case 18:
+ return MCDE_CRA1_OUTBPP_18BPP;
+ case 24:
+ return MCDE_CRA1_OUTBPP_24BPP;
+ default:
+ return 0;
+ }
+}
+
+static u8 portfmt2cdwin(enum mcde_port_pix_fmt pix_fmt)
+{
+ switch (pix_fmt) {
+ case MCDE_PORTPIXFMT_DPI_16BPP_C1:
+ return MCDE_CRA1_CDWIN_16BPP_C1;
+ case MCDE_PORTPIXFMT_DPI_16BPP_C2:
+ return MCDE_CRA1_CDWIN_16BPP_C2;
+ case MCDE_PORTPIXFMT_DPI_16BPP_C3:
+ return MCDE_CRA1_CDWIN_16BPP_C3;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C1:
+ return MCDE_CRA1_CDWIN_18BPP_C1;
+ case MCDE_PORTPIXFMT_DPI_18BPP_C2:
+ return MCDE_CRA1_CDWIN_18BPP_C2;
+ case MCDE_PORTPIXFMT_DPI_24BPP:
+ return MCDE_CRA1_CDWIN_24BPP;
+ default:
+ /* only DPI formats are relevant */
+ return 0;
+ }
+}
+
+static u32 get_output_fifo_size(enum mcde_fifo fifo)
+{
+ u32 ret = 1; /* Avoid div by zero */
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ case MCDE_FIFO_B:
+ ret = output_fifo_ab_size;
+ break;
+ case MCDE_FIFO_C0:
+ case MCDE_FIFO_C1:
+ ret = output_fifo_c0c1_size;
+ break;
+ default:
+ dev_warn(&mcde_dev->dev, "Unsupported fifo");
+ break;
+ }
+ return ret;
+}
+
+static inline u8 get_dsi_formatter_id(const struct mcde_port *port)
+{
+ if (dsi_ifc_is_supported)
+ return 2 * port->link + port->ifc;
+ else
+ return port->link;
+}
+
+static struct mcde_chnl_state *find_channel_by_dsilink(int link)
+{
+ struct mcde_chnl_state *chnl = &channels[0];
+ for (; chnl < &channels[num_channels]; chnl++)
+ if (chnl->enabled && chnl->port.link == link &&
+ chnl->port.type == MCDE_PORTTYPE_DSI)
+ return chnl;
+ return NULL;
+}
+
+static inline void mcde_handle_vcmp(struct mcde_chnl_state *chnl)
+{
+ if (!chnl->vcmp_per_field ||
+ (chnl->vcmp_per_field && chnl->even_vcmp)) {
+ atomic_inc(&chnl->vcmp_cnt);
+ if (chnl->state == CHNLSTATE_STOPPING)
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPED);
+ wake_up_all(&chnl->vcmp_waitq);
+ }
+ chnl->even_vcmp = !chnl->even_vcmp;
+}
+
+static void handle_dsi_irq(struct mcde_chnl_state *chnl, int i)
+{
+ u32 irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG);
+ if (irq_status) {
+ dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true));
+ dev_vdbg(&mcde_dev->dev, "BTA TE DSI%d\n", i);
+ if (chnl->port.frame_trig == MCDE_TRIG_SW)
+ do_softwaretrig(chnl);
+ }
+
+ irq_status = dsi_rfld(i, DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG);
+ if (irq_status) {
+ dsi_wreg(i, DSI_CMD_MODE_STS_CLR,
+ DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true));
+ dev_warn(&mcde_dev->dev, "NO_TE DSI%d\n", i);
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPED);
+ }
+
+ irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED);
+ if (irq_status) {
+ /* DSI TE polling answer received */
+ dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(true));
+
+ /* Reset TE watchdog timer */
+ if (chnl->port.sync_src == MCDE_SYNCSRC_TE_POLLING)
+ dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT);
+ }
+}
+
+static irqreturn_t mcde_irq_handler(int irq, void *dev)
+{
+ int i;
+ u32 irq_status;
+
+ irq_status = mcde_rreg(MCDE_MISCHNL);
+ if (irq_status) {
+ dev_err(&mcde_dev->dev, "chnl error=%.8x\n", irq_status);
+ mcde_wreg(MCDE_RISCHNL, irq_status);
+ }
+ irq_status = mcde_rreg(MCDE_MISERR);
+ if (irq_status) {
+ dev_err(&mcde_dev->dev, "error=%.8x\n", irq_status);
+ mcde_wreg(MCDE_RISERR, irq_status);
+ }
+
+ /* Handle channel irqs */
+ irq_status = mcde_rreg(MCDE_RISPP);
+ if (irq_status & MCDE_RISPP_VCMPARIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_A]);
+ if (irq_status & MCDE_RISPP_VCMPBRIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_B]);
+ if (irq_status & MCDE_RISPP_VCMPC0RIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_C0]);
+ if (irq_status & MCDE_RISPP_VCMPC1RIS_MASK)
+ mcde_handle_vcmp(&channels[MCDE_CHNL_C1]);
+ mcde_wreg(MCDE_RISPP, irq_status);
+
+ for (i = 0; i < num_dsilinks; i++) {
+ struct mcde_chnl_state *chnl_from_dsi;
+
+ chnl_from_dsi = find_channel_by_dsilink(i);
+
+ if (chnl_from_dsi == NULL)
+ continue;
+
+ handle_dsi_irq(chnl_from_dsi, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Transitions allowed: WAIT_TE -> UPDATE -> STOPPING */
+static int set_channel_state_atomic(struct mcde_chnl_state *chnl,
+ enum chnl_state state)
+{
+ enum chnl_state chnl_state = chnl->state;
+
+ dev_dbg(&mcde_dev->dev, "Channel state change"
+ " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl_state, state);
+
+ if ((chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_WAIT_TE) ||
+ (chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_RUNNING) ||
+ (chnl_state == CHNLSTATE_WAIT_TE && state == CHNLSTATE_RUNNING) ||
+ (chnl_state == CHNLSTATE_RUNNING && state == CHNLSTATE_STOPPING)) {
+ /* Set wait TE, running, or stopping state */
+ chnl->state = state;
+ return 0;
+ } else if ((chnl_state == CHNLSTATE_STOPPING &&
+ state == CHNLSTATE_STOPPED) ||
+ (chnl_state == CHNLSTATE_WAIT_TE &&
+ state == CHNLSTATE_STOPPED)) {
+ /* Set stopped state */
+ chnl->state = state;
+ wake_up_all(&chnl->state_waitq);
+ return 0;
+ } else if (state == CHNLSTATE_IDLE) {
+ /* Set idle state */
+ WARN_ON_ONCE(chnl_state != CHNLSTATE_DSI_READ &&
+ chnl_state != CHNLSTATE_DSI_WRITE &&
+ chnl_state != CHNLSTATE_SUSPEND);
+ chnl->state = state;
+ wake_up_all(&chnl->state_waitq);
+ return 0;
+ } else {
+ /* Invalid atomic state transition */
+ dev_warn(&mcde_dev->dev, "Channel state change error (chnl=%d,"
+ " old=%d, new=%d)\n", chnl->id, chnl_state, state);
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+}
+
+/* LOCKING: mcde_hw_lock */
+static int set_channel_state_sync(struct mcde_chnl_state *chnl,
+ enum chnl_state state)
+{
+ int ret = 0;
+ enum chnl_state chnl_state = chnl->state;
+
+ dev_dbg(&mcde_dev->dev, "Channel state change"
+ " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl->state, state);
+
+ /* No change */
+ if (chnl_state == state)
+ return 0;
+
+ /* Wait for IDLE before changing state */
+ if (chnl_state != CHNLSTATE_IDLE) {
+ ret = wait_event_timeout(chnl->state_waitq,
+ /* STOPPED -> IDLE is manual, so wait for both */
+ chnl->state == CHNLSTATE_STOPPED ||
+ chnl->state == CHNLSTATE_IDLE,
+ msecs_to_jiffies(CHNL_TIMEOUT));
+ if (WARN_ON_ONCE(!ret))
+ dev_warn(&mcde_dev->dev, "Wait for channel timeout "
+ "(chnl=%d, curr=%d, new=%d)\n",
+ chnl->id, chnl->state, state);
+ chnl_state = chnl->state;
+ }
+
+ /* Do manual transition from STOPPED to IDLE */
+ if (chnl_state == CHNLSTATE_STOPPED)
+ wait_for_flow_disabled(chnl);
+
+ /* State is IDLE, do transition to new state */
+ chnl->state = state;
+
+ return ret;
+}
+
+static int wait_for_vcmp(struct mcde_chnl_state *chnl)
+{
+ u64 vcmp = atomic_read(&chnl->vcmp_cnt) + 1;
+ int ret = wait_event_timeout(chnl->vcmp_waitq,
+ atomic_read(&chnl->vcmp_cnt) >= vcmp,
+ msecs_to_jiffies(CHNL_TIMEOUT));
+ return ret;
+}
+
+static void get_vid_operating_mode(const struct mcde_port *port,
+ bool *burst_mode, bool *sync_is_pulse, bool *tvg_enable)
+{
+ switch (port->phy.dsi.vid_mode) {
+ case NON_BURST_MODE_WITH_SYNC_EVENT:
+ *burst_mode = false;
+ *sync_is_pulse = false;
+ *tvg_enable = false;
+ break;
+ case NON_BURST_MODE_WITH_SYNC_EVENT_TVG_ENABLED:
+ *burst_mode = false;
+ *sync_is_pulse = false;
+ *tvg_enable = true;
+ break;
+ case BURST_MODE_WITH_SYNC_EVENT:
+ *burst_mode = true;
+ *sync_is_pulse = false;
+ *tvg_enable = false;
+ break;
+ case BURST_MODE_WITH_SYNC_PULSE:
+ *burst_mode = true;
+ *sync_is_pulse = true;
+ *tvg_enable = false;
+ break;
+ default:
+ dev_err(&mcde_dev->dev, "Unsupported video mode");
+ break;
+ }
+}
+
+static void update_vid_static_registers(const struct mcde_port *port)
+{
+ u8 link = port->link;
+ bool burst_mode, sync_is_pulse, tvg_enable;
+
+ get_vid_operating_mode(port, &burst_mode, &sync_is_pulse, &tvg_enable);
+
+ /* burst mode or non-burst mode */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, BURST_MODE, burst_mode);
+
+ /* sync is pulse or event */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, sync_is_pulse);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, sync_is_pulse);
+
+ /* disable video stream when using TVG */
+ if (tvg_enable) {
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, IF1_EN, false);
+ dsi_wfld(link, DSI_MCTL_MAIN_EN, IF2_EN, false);
+ }
+
+ /*
+ * behavior during blanking time
+ * 00: NULL packet 1x:LP 01:blanking-packet
+ */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, 1);
+
+ /*
+ * behavior during eol
+ * 00: NULL packet 1x:LP 01:blanking-packet
+ */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, 2);
+
+ /* time to perform LP->HS on D-PHY */
+ dsi_wfld(link, DSI_VID_DPHY_TIME, REG_WAKEUP_TIME,
+ port->phy.dsi.vid_wakeup_time);
+
+ /*
+ * video stream starts on VSYNC packet
+ * and stops at the end of a frame
+ */
+ dsi_wfld(link, DSI_VID_MAIN_CTL, VID_ID, port->phy.dsi.virt_id);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, START_MODE, 0);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, STOP_MODE, 0);
+
+ /* 1: if1 in video mode, 0: if1 in command mode */
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, 1);
+
+ /* 1: enables the link, 0: disables the link */
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, VID_EN, 1);
+}
+
+static int update_channel_static_registers(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+
+ switch (chnl->fifo) {
+ case MCDE_FIFO_A:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_A));
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CTRLA, FORMTYPE,
+ MCDE_CTRLA_FORMTYPE_DPITV);
+ mcde_wfld(MCDE_CTRLA, FORMID, port->link);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ mcde_wfld(MCDE_CTRLA, FORMTYPE,
+ MCDE_CTRLA_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLA, FORMID,
+ get_dsi_formatter_id(port));
+ }
+ break;
+ case MCDE_FIFO_B:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_B));
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ mcde_wfld(MCDE_CTRLB, FORMTYPE,
+ MCDE_CTRLB_FORMTYPE_DPITV);
+ mcde_wfld(MCDE_CTRLB, FORMID, port->link);
+ } else if (port->type == MCDE_PORTTYPE_DSI) {
+ mcde_wfld(MCDE_CTRLB, FORMTYPE,
+ MCDE_CTRLB_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLB, FORMID,
+ get_dsi_formatter_id(port));
+ }
+
+ break;
+ case MCDE_FIFO_C0:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_C0));
+ if (port->type == MCDE_PORTTYPE_DPI)
+ return -EINVAL;
+ mcde_wfld(MCDE_CTRLC0, FORMTYPE,
+ MCDE_CTRLC0_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLC0, FORMID, get_dsi_formatter_id(port));
+ break;
+ case MCDE_FIFO_C1:
+ mcde_wreg(MCDE_CHNL0MUXING + chnl->id *
+ MCDE_CHNL0MUXING_GROUPOFFSET,
+ MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_C1));
+ if (port->type == MCDE_PORTTYPE_DPI)
+ return -EINVAL;
+ mcde_wfld(MCDE_CTRLC1, FORMTYPE,
+ MCDE_CTRLC1_FORMTYPE_DSI);
+ mcde_wfld(MCDE_CTRLC1, FORMID, get_dsi_formatter_id(port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Formatter */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ int i = 0;
+ u8 idx;
+ u8 lnk = port->link;
+
+ idx = get_dsi_formatter_id(port);
+
+ if (dsi_link_enable(chnl))
+ goto failed_to_enable_link;
+
+ if (port->sync_src == MCDE_SYNCSRC_TE_POLLING) {
+ /* Enable DSI TE polling */
+ dsi_te_poll_req(chnl);
+
+ /* Set timer to detect non TE answer */
+ dsi_te_poll_set_timer(chnl,
+ DSI_TE_NO_ANSWER_TIMEOUT_INIT);
+ } else {
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true);
+ }
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN,
+ port->phy.dsi.host_eot_gen);
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN,
+ port->phy.dsi.data_lanes_swap);
+
+ dsi_wreg(lnk, DSI_MCTL_DPHY_STATIC,
+ DSI_MCTL_DPHY_STATIC_UI_X4(port->phy.dsi.ui));
+ dsi_wreg(lnk, DSI_DPHY_LANES_TRIM,
+ DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(0_90));
+ dsi_wreg(lnk, DSI_MCTL_DPHY_TIMEOUT,
+ DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(0xf) |
+ DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(0x3fff) |
+ DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(0x3fff));
+ dsi_wreg(lnk, DSI_MCTL_MAIN_PHY_CTL,
+ DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(0xf) |
+ DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(true) |
+ DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(
+ port->phy.dsi.num_data_lanes >= 2) |
+ DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(
+ port->phy.dsi.clk_cont));
+ /* TODO: make enum */
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_MODE, false);
+ /* TODO: make enum */
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_PRI, port->ifc == 1);
+ dsi_wreg(lnk, DSI_MCTL_MAIN_EN,
+ DSI_MCTL_MAIN_EN_PLL_START(true) |
+ DSI_MCTL_MAIN_EN_CKLANE_EN(true) |
+ DSI_MCTL_MAIN_EN_DAT1_EN(true) |
+ DSI_MCTL_MAIN_EN_DAT2_EN(port->phy.dsi.num_data_lanes
+ == 2) |
+ DSI_MCTL_MAIN_EN_IF1_EN(port->ifc == 0) |
+ DSI_MCTL_MAIN_EN_IF2_EN(port->ifc == 1));
+ while (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, CLKLANE_READY) == 0 ||
+ dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT1_READY) == 0 ||
+ (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT2_READY) == 0 &&
+ port->phy.dsi.num_data_lanes > 1)) {
+ mdelay(1);
+ if (i++ == 10) {
+ dev_warn(&mcde_dev->dev,
+ "DSI lane not ready (link=%d)!\n", lnk);
+ goto dsi_link_error;
+ }
+ }
+
+ dsi_link_handle_ulpm(&chnl->port, false);
+ mcde_wreg(MCDE_DSIVID0CONF0 +
+ idx * MCDE_DSIVID0CONF0_GROUPOFFSET,
+ MCDE_DSIVID0CONF0_BLANKING(0) |
+ MCDE_DSIVID0CONF0_VID_MODE(
+ port->mode == MCDE_PORTMODE_VID) |
+ MCDE_DSIVID0CONF0_CMD8(true) |
+ MCDE_DSIVID0CONF0_BIT_SWAP(false) |
+ MCDE_DSIVID0CONF0_BYTE_SWAP(false) |
+ MCDE_DSIVID0CONF0_DCSVID_NOTGEN(true));
+
+ if (port->mode == MCDE_PORTMODE_VID) {
+ update_vid_static_registers(port);
+ } else {
+ if (port->ifc == 0)
+ dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF1_ID,
+ port->phy.dsi.virt_id);
+ else if (port->ifc == 1)
+ dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF2_ID,
+ port->phy.dsi.virt_id);
+ }
+ }
+
+ if (port->type == MCDE_PORTTYPE_DPI) {
+ if (port->phy.dpi.lcd_freq != clk_round_rate(chnl->clk_dpi,
+ port->phy.dpi.lcd_freq))
+ dev_warn(&mcde_dev->dev, "Could not set lcd freq"
+ " to %d\n", port->phy.dpi.lcd_freq);
+ WARN_ON_ONCE(clk_set_rate(chnl->clk_dpi,
+ port->phy.dpi.lcd_freq));
+ WARN_ON_ONCE(clk_enable(chnl->clk_dpi));
+ }
+
+ mcde_wfld(MCDE_CR, MCDEEN, true);
+ chnl->formatter_updated = true;
+
+ dev_vdbg(&mcde_dev->dev, "Static registers setup, chnl=%d\n", chnl->id);
+
+ return 0;
+dsi_link_error:
+ dsi_link_disable(chnl, true);
+failed_to_enable_link:
+ return -EINVAL;
+}
+
+void mcde_chnl_col_convert_apply(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->transform != transform) {
+
+ chnl->col_regs.y_red = transform->matrix[0][0];
+ chnl->col_regs.y_green = transform->matrix[0][1];
+ chnl->col_regs.y_blue = transform->matrix[0][2];
+ chnl->col_regs.cb_red = transform->matrix[1][0];
+ chnl->col_regs.cb_green = transform->matrix[1][1];
+ chnl->col_regs.cb_blue = transform->matrix[1][2];
+ chnl->col_regs.cr_red = transform->matrix[2][0];
+ chnl->col_regs.cr_green = transform->matrix[2][1];
+ chnl->col_regs.cr_blue = transform->matrix[2][2];
+ chnl->col_regs.off_y = transform->offset[0];
+ chnl->col_regs.off_cb = transform->offset[1];
+ chnl->col_regs.off_cr = transform->offset[2];
+ chnl->col_regs.dirty = true;
+
+ chnl->transform = transform;
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+static void chnl_ovly_pixel_format_apply(struct mcde_chnl_state *chnl,
+ struct mcde_ovly_state *ovly)
+{
+ struct mcde_port *port = &chnl->port;
+ struct ovly_regs *regs = &ovly->regs;
+
+ /* Note: YUV -> YUV: blending YUV overlays will not make sense. */
+ static struct mcde_col_transform crycb_2_ycbcr = {
+ /* Note that in MCDE YUV 422 pixels come as VYU pixels */
+ .matrix = {
+ {0x0000, 0x0100, 0x0000},
+ {0x0000, 0x0000, 0x0100},
+ {0x0100, 0x0000, 0x0000},
+ },
+ .offset = {0, 0, 0},
+ };
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ if (port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422) {
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) {
+ /* standard case: DSI: RGB -> RGB */
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ } else {
+ /* DSI: YUV -> RGB */
+ /* TODO change matrix */
+ regs->col_conv =
+ MCDE_OVL0CR_COLCCTRL_ENABLED_SAT;
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->ycbcr_2_rgb);
+ }
+ } else {
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422)
+ /* DSI: RGB -> YUV */
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->rgb_2_ycbcr);
+ else
+ /* DSI: YUV -> YUV */
+ mcde_chnl_col_convert_apply(chnl,
+ &crycb_2_ycbcr);
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI && port->phy.dpi.tv_mode) {
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT;
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422)
+ mcde_chnl_col_convert_apply(chnl, &chnl->rgb_2_ycbcr);
+ else
+ mcde_chnl_col_convert_apply(chnl, &crycb_2_ycbcr);
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ /* Note: YUV is not support port pixel format for DPI */
+ if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) {
+ /* standard case: DPI: RGB -> RGB */
+ regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ } else {
+ /* DPI: YUV -> RGB */
+ regs->col_conv =
+ MCDE_OVL0CR_COLCCTRL_ENABLED_SAT;
+ mcde_chnl_col_convert_apply(chnl,
+ &chnl->ycbcr_2_rgb);
+ }
+ }
+}
+
+/* REVIEW: Make update_* an mcde_rectangle? */
+static void update_overlay_registers(u8 idx, struct ovly_regs *regs,
+ struct mcde_port *port, enum mcde_fifo fifo,
+ u16 update_x, u16 update_y, u16 update_w,
+ u16 update_h, s16 stride, bool interlaced,
+ enum mcde_display_rotation rotation)
+{
+ /* TODO: fix clipping for small overlay */
+ u32 lmrgn = (regs->cropx + update_x) * regs->bits_per_pixel;
+ u32 tmrgn = (regs->cropy + update_y) * stride;
+ u32 ppl = regs->ppl - update_x;
+ u32 lpf = regs->lpf - update_y;
+ s32 ljinc = stride;
+ u32 pixelfetchwtrmrklevel;
+ u8 nr_of_bufs = 1;
+ u32 sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+
+ if (rotation == MCDE_DISPLAY_ROT_180_CCW) {
+ ljinc = -ljinc;
+ tmrgn += stride * (regs->lpf - 1) / 8;
+ }
+
+ /*
+ * Preferably most of this is done in some apply function instead of for
+ * every update. However lpf has a dependency on update_y.
+ */
+ if (interlaced && port->type == MCDE_PORTTYPE_DSI) {
+ nr_of_bufs = 2;
+ lpf = lpf / 2;
+ ljinc *= 2;
+ }
+
+ if ((fifo == MCDE_FIFO_A || fifo == MCDE_FIFO_B) &&
+ regs->ppl >= SCREEN_PPL_HIGH)
+ pixelfetchwtrmrklevel = input_fifo_size * 2;
+ else
+ pixelfetchwtrmrklevel = input_fifo_size / 2;
+
+ if (port->update_auto_trig && port->type == MCDE_PORTTYPE_DSI) {
+ switch (port->sync_src) {
+ case MCDE_SYNCSRC_OFF:
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+ break;
+ case MCDE_SYNCSRC_TE0:
+ case MCDE_SYNCSRC_TE1:
+ case MCDE_SYNCSRC_TE_POLLING:
+ default:
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE;
+ break;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI)
+ sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL;
+
+ mcde_wreg(MCDE_EXTSRC0CONF + idx * MCDE_EXTSRC0CONF_GROUPOFFSET,
+ MCDE_EXTSRC0CONF_BUF_ID(0) |
+ MCDE_EXTSRC0CONF_BUF_NB(nr_of_bufs) |
+ MCDE_EXTSRC0CONF_PRI_OVLID(idx) |
+ MCDE_EXTSRC0CONF_BPP(regs->bpp) |
+ MCDE_EXTSRC0CONF_BGR(regs->bgr) |
+ MCDE_EXTSRC0CONF_BEBO(regs->bebo) |
+ MCDE_EXTSRC0CONF_BEPO(false));
+ mcde_wreg(MCDE_EXTSRC0CR + idx * MCDE_EXTSRC0CR_GROUPOFFSET,
+ MCDE_EXTSRC0CR_SEL_MOD(sel_mod) |
+ MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(PRIMARY) |
+ MCDE_EXTSRC0CR_FS_DIV_DISABLE(false) |
+ MCDE_EXTSRC0CR_FORCE_FS_DIV(false));
+ mcde_wreg(MCDE_OVL0CR + idx * MCDE_OVL0CR_GROUPOFFSET,
+ MCDE_OVL0CR_OVLEN(regs->enabled) |
+ MCDE_OVL0CR_COLCCTRL(regs->col_conv) |
+ MCDE_OVL0CR_CKEYGEN(false) |
+ MCDE_OVL0CR_ALPHAPMEN(false) |
+ MCDE_OVL0CR_OVLF(false) |
+ MCDE_OVL0CR_OVLR(false) |
+ MCDE_OVL0CR_OVLB(false) |
+ MCDE_OVL0CR_FETCH_ROPC(0) |
+ MCDE_OVL0CR_STBPRIO(0) |
+ MCDE_OVL0CR_BURSTSIZE_ENUM(HW_8W) |
+ /* TODO: enum, get from ovly */
+ MCDE_OVL0CR_MAXOUTSTANDING_ENUM(8_REQ) |
+ /* TODO: _HW_8W, calculate? */
+ MCDE_OVL0CR_ROTBURSTSIZE_ENUM(HW_8W));
+ mcde_wreg(MCDE_OVL0CONF + idx * MCDE_OVL0CONF_GROUPOFFSET,
+ MCDE_OVL0CONF_PPL(ppl) |
+ MCDE_OVL0CONF_EXTSRC_ID(idx) |
+ MCDE_OVL0CONF_LPF(lpf));
+ mcde_wreg(MCDE_OVL0CONF2 + idx * MCDE_OVL0CONF2_GROUPOFFSET,
+ MCDE_OVL0CONF2_BP(regs->alpha_source) |
+ MCDE_OVL0CONF2_ALPHAVALUE(regs->alpha_value) |
+ MCDE_OVL0CONF2_OPQ(regs->opq) |
+ MCDE_OVL0CONF2_PIXOFF(lmrgn & 63) |
+ MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(
+ pixelfetchwtrmrklevel));
+ mcde_wreg(MCDE_OVL0LJINC + idx * MCDE_OVL0LJINC_GROUPOFFSET,
+ ljinc);
+ mcde_wreg(MCDE_OVL0CROP + idx * MCDE_OVL0CROP_GROUPOFFSET,
+ MCDE_OVL0CROP_TMRGN(tmrgn) |
+ MCDE_OVL0CROP_LMRGN(lmrgn >> 6));
+ regs->dirty = false;
+
+ dev_vdbg(&mcde_dev->dev, "Overlay registers setup, idx=%d\n", idx);
+}
+
+static void update_overlay_registers_on_the_fly(u8 idx, struct ovly_regs *regs)
+{
+ mcde_wreg(MCDE_OVL0COMP + idx * MCDE_OVL0COMP_GROUPOFFSET,
+ MCDE_OVL0COMP_XPOS(regs->xpos) |
+ MCDE_OVL0COMP_CH_ID(regs->ch_id) |
+ MCDE_OVL0COMP_YPOS(regs->ypos) |
+ MCDE_OVL0COMP_Z(regs->z));
+
+ mcde_wreg(MCDE_EXTSRC0A0 + idx * MCDE_EXTSRC0A0_GROUPOFFSET,
+ regs->baseaddress0);
+ mcde_wreg(MCDE_EXTSRC0A1 + idx * MCDE_EXTSRC0A1_GROUPOFFSET,
+ regs->baseaddress1);
+ regs->dirty_buf = false;
+}
+
+static void do_softwaretrig(struct mcde_chnl_state *chnl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ enable_flow(chnl);
+ mcde_wreg(MCDE_CHNL0SYNCHSW +
+ chnl->id * MCDE_CHNL0SYNCHSW_GROUPOFFSET,
+ MCDE_CHNL0SYNCHSW_SW_TRIG(true));
+ disable_flow(chnl);
+
+ local_irq_restore(flags);
+
+ dev_vdbg(&mcde_dev->dev, "Software TRIG on channel %d\n", chnl->id);
+}
+
+static void disable_flow(struct mcde_chnl_state *chnl)
+{
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(chnl->state != CHNLSTATE_RUNNING))
+ return;
+
+ local_irq_save(flags);
+
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ mcde_wfld(MCDE_CRA0, FLOEN, false);
+ break;
+ case MCDE_CHNL_B:
+ mcde_wfld(MCDE_CRB0, FLOEN, false);
+ break;
+ case MCDE_CHNL_C0:
+ mcde_wfld(MCDE_CRC, C1EN, false);
+ break;
+ case MCDE_CHNL_C1:
+ mcde_wfld(MCDE_CRC, C2EN, false);
+ break;
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_STOPPING);
+
+ local_irq_restore(flags);
+}
+
+static void stop_channel(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+ bool dpi_lcd_mode;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->state != CHNLSTATE_RUNNING)
+ return;
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS,
+ false);
+ if (port->sync_src == MCDE_SYNCSRC_TE_POLLING)
+ del_timer(&chnl->dsi_te_timer);
+ }
+
+ disable_flow(chnl);
+ /*
+ * Needs to manually trigger VCOMP after the channel is
+ * disabled. For all channels using video mode
+ * except for dpi lcd.
+ */
+ dpi_lcd_mode = (port->type == MCDE_PORTTYPE_DPI &&
+ !chnl->port.phy.dpi.tv_mode);
+
+ if (chnl->port.update_auto_trig && !dpi_lcd_mode)
+ mcde_wreg(MCDE_SISPP, 1 << chnl->id);
+}
+
+static void wait_for_flow_disabled(struct mcde_chnl_state *chnl)
+{
+ int i = 0;
+
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRA0, FLOEN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (A) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_B:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRB0, FLOEN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (B) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_C0:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRC, C1EN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (C1) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ case MCDE_CHNL_C1:
+ for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) {
+ if (!mcde_rfld(MCDE_CRC, C2EN)) {
+ dev_vdbg(&mcde_dev->dev,
+ "Flow (C2) disable after >= %d ms\n", i);
+ break;
+ }
+ msleep(1);
+ }
+ break;
+ }
+ if (i == MCDE_FLOWEN_MAX_TRIAL)
+ dev_err(&mcde_dev->dev, "%s: channel %d timeout\n",
+ __func__, chnl->id);
+}
+
+static void enable_flow(struct mcde_chnl_state *chnl)
+{
+ const struct mcde_port *port = &chnl->port;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (port->type == MCDE_PORTTYPE_DSI)
+ dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS,
+ port->phy.dsi.clk_cont);
+
+ /*
+ * When ROTEN is set, the FLOEN bit will also be set but
+ * the flow has to be started anyway.
+ */
+ switch (chnl->id) {
+ case MCDE_CHNL_A:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRA0, FLOEN));
+ mcde_wfld(MCDE_CRA0, ROTEN, chnl->regs.roten);
+ mcde_wfld(MCDE_CRA0, FLOEN, true);
+ break;
+ case MCDE_CHNL_B:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRB0, FLOEN));
+ mcde_wfld(MCDE_CRB0, ROTEN, chnl->regs.roten);
+ mcde_wfld(MCDE_CRB0, FLOEN, true);
+ break;
+ case MCDE_CHNL_C0:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C1EN));
+ mcde_wfld(MCDE_CRC, C1EN, true);
+ break;
+ case MCDE_CHNL_C1:
+ WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C2EN));
+ mcde_wfld(MCDE_CRC, C2EN, true);
+ break;
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_RUNNING);
+}
+
+static void work_sleep_function(struct work_struct *ptr)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ if (mcde_trylock(__func__, __LINE__)) {
+ if (mcde_dynamic_power_management)
+ disable_mcde_hw(false, false);
+ mcde_unlock(__func__, __LINE__);
+ }
+}
+
+/* TODO get from register */
+#define MCDE_CLK_FREQ_MHZ 160
+static u32 get_pkt_div(u32 disp_ppl,
+ struct mcde_port *port,
+ enum mcde_fifo fifo)
+{
+ /*
+ * The lines can be split in several packets only on DSI CMD mode.
+ * In DSI VIDEO mode, 1 line = 1 packet.
+ * DPI is like DSI VIDEO (watermark = 1 line).
+ * DPI waits for fifo ready only for the first line of the first frame.
+ * If line is wider than fifo size, one can set watermark
+ * at fifo size, or set it to line size as watermark will be
+ * saturated at fifo size inside MCDE.
+ */
+ switch (port->type) {
+ case MCDE_PORTTYPE_DSI:
+ if (port->mode == MCDE_PORTMODE_CMD)
+ /* Equivalent of ceil(disp_ppl/fifo_size) */
+ return (disp_ppl - 1) / get_output_fifo_size(fifo) + 1;
+ else
+ return 1;
+ break;
+ case MCDE_PORTTYPE_DPI:
+ return 1;
+ break;
+ default:
+ break;
+ }
+ return 1;
+}
+
+static void update_vid_horizontal_blanking(struct mcde_port *port,
+ struct mcde_video_mode *vmode, bool sync_is_pulse, u8 bpp)
+{
+ int hfp, hbp, hsa;
+ u8 link = port->link;
+
+ /*
+ * vmode->hfp, vmode->hbp and vmode->hsw are given in pixels
+ * and must be re-calculated into bytes
+ *
+ * 6 + 2 is HFP header + checksum
+ */
+ hfp = vmode->hfp * bpp - 6 - 2;
+ if (sync_is_pulse) {
+ /*
+ * 6 is HBP header + checksum
+ * 4 is RGB header + checksum
+ */
+ hbp = vmode->hbp * bpp - 4 - 6;
+ /*
+ * 6 is HBP header + checksum
+ * 4 is HSW packet bytes
+ * 4 is RGB header + checksum
+ */
+ hsa = vmode->hsw * bpp - 4 - 4 - 6;
+ } else {
+ /*
+ * 6 is HBP header + checksum
+ * 4 is HSW packet bytes
+ * 4 is RGB header + checksum
+ */
+ hbp = (vmode->hbp + vmode->hsw) * bpp - 4 - 4 - 6;
+ /* HSA is not considered in this mode and set to 0 */
+ hsa = 0;
+ }
+ if (hfp < 0) {
+ hfp = 0;
+ dev_warn(&mcde_dev->dev,
+ "%s: negative calc for hfp, set to 0\n", __func__);
+ }
+ if (hbp < 0) {
+ hbp = 0;
+ dev_warn(&mcde_dev->dev,
+ "%s: negative calc for hbp, set to 0\n", __func__);
+ }
+ if (hsa < 0) {
+ hsa = 0;
+ dev_warn(&mcde_dev->dev,
+ "%s: negative calc for hsa, set to 0\n", __func__);
+ }
+
+ dsi_wfld(link, DSI_VID_HSIZE1, HFP_LENGTH, hfp);
+ dsi_wfld(link, DSI_VID_HSIZE1, HBP_LENGTH, hbp);
+ dsi_wfld(link, DSI_VID_HSIZE1, HSA_LENGTH, hsa);
+}
+
+static void update_vid_frame_parameters(struct mcde_port *port,
+ struct mcde_video_mode *vmode, u8 bpp)
+{
+ u8 link = port->link;
+ bool burst_mode, sync_is_pulse, tvg_enable;
+ u32 hs_byte_clk, pck_len, blkline_pck, line_duration;
+ u32 blkeol_pck, blkeol_duration;
+ u8 pixel_mode;
+ u8 rgb_header;
+
+ get_vid_operating_mode(port, &burst_mode, &sync_is_pulse, &tvg_enable);
+
+ dsi_wfld(link, DSI_VID_VSIZE, VFP_LENGTH, vmode->vfp);
+ dsi_wfld(link, DSI_VID_VSIZE, VBP_LENGTH, vmode->vbp);
+ dsi_wfld(link, DSI_VID_VSIZE, VSA_LENGTH, vmode->vsw);
+ update_vid_horizontal_blanking(port, vmode, sync_is_pulse, bpp);
+
+ dsi_wfld(link, DSI_VID_VSIZE, VACT_LENGTH, vmode->yres);
+ dsi_wfld(link, DSI_VID_HSIZE2, RGB_SIZE, vmode->xres * bpp);
+
+ /*
+ * The rgb_header identifies the pixel stream format,
+ * as described in the MIPI DSI Specification:
+ *
+ * 0x0E: Packed pixel stream, 16-bit RGB, 565 format
+ * 0x1E: Packed pixel stream, 18-bit RGB, 666 format
+ * 0x2E: Loosely Packed pixel stream, 18-bit RGB, 666 format
+ * 0x3E: Packed pixel stream, 24-bit RGB, 888 format
+ */
+ switch (port->pixel_format) {
+ case MCDE_PORTPIXFMT_DSI_16BPP:
+ pixel_mode = 0;
+ rgb_header = 0x0E;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP:
+ pixel_mode = 2;
+ rgb_header = 0x2E;
+ break;
+ case MCDE_PORTPIXFMT_DSI_18BPP_PACKED:
+ pixel_mode = 1;
+ rgb_header = 0x1E;
+ break;
+ case MCDE_PORTPIXFMT_DSI_24BPP:
+ pixel_mode = 3;
+ rgb_header = 0x3E;
+ break;
+ default:
+ pixel_mode = 3;
+ rgb_header = 0x3E;
+ dev_warn(&mcde_dev->dev,
+ "%s: invalid pixel format %d\n",
+ __func__, port->pixel_format);
+ break;
+ }
+
+ dsi_wfld(link, DSI_VID_MAIN_CTL, VID_PIXEL_MODE, pixel_mode);
+ dsi_wfld(link, DSI_VID_MAIN_CTL, HEADER, rgb_header);
+
+ if (tvg_enable) {
+ /*
+ * with these settings, expect to see 64 pixels wide
+ * red and green vertical stripes on the screen when
+ * tvg_enable = 1
+ */
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, 1);
+
+ dsi_wfld(link, DSI_TVG_CTL, TVG_STRIPE_SIZE, 6);
+ dsi_wfld(link, DSI_TVG_CTL, TVG_MODE, 2);
+ dsi_wfld(link, DSI_TVG_CTL, TVG_STOPMODE, 2);
+ dsi_wfld(link, DSI_TVG_CTL, TVG_RUN, 1);
+
+ dsi_wfld(link, DSI_TVG_IMG_SIZE, TVG_NBLINE, vmode->yres);
+ dsi_wfld(link, DSI_TVG_IMG_SIZE, TVG_LINE_SIZE,
+ vmode->xres * bpp);
+
+ dsi_wfld(link, DSI_TVG_COLOR1, COL1_BLUE, 0);
+ dsi_wfld(link, DSI_TVG_COLOR1, COL1_GREEN, 0);
+ dsi_wfld(link, DSI_TVG_COLOR1, COL1_RED, 0xFF);
+
+ dsi_wfld(link, DSI_TVG_COLOR2, COL2_BLUE, 0);
+ dsi_wfld(link, DSI_TVG_COLOR2, COL2_GREEN, 0xFF);
+ dsi_wfld(link, DSI_TVG_COLOR2, COL2_RED, 0);
+ }
+
+ /*
+ * vid->pixclock is the time between two pixels (in picoseconds)
+ *
+ * hs_byte_clk is the amount of transferred bytes per lane and
+ * second (in MHz)
+ */
+ hs_byte_clk = 1000000 / vmode->pixclock / 8;
+ pck_len = 1000000 * hs_byte_clk / port->refresh_rate /
+ (vmode->vsw + vmode->vbp + vmode->yres + vmode->vfp) *
+ port->phy.dsi.num_data_lanes;
+
+ /*
+ * 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
+ * 4 is short packet for vsync/hsync
+ */
+ if (sync_is_pulse)
+ blkline_pck = pck_len - vmode->hsw - 6;
+ else
+ blkline_pck = pck_len - 4 - 6;
+
+ line_duration = (blkline_pck + 6) / port->phy.dsi.num_data_lanes;
+ blkeol_pck = pck_len -
+ (vmode->hsw + vmode->hbp + vmode->xres + vmode->hfp) * bpp - 6;
+ blkeol_duration = (blkeol_pck + 6) / port->phy.dsi.num_data_lanes;
+
+ if (sync_is_pulse)
+ dsi_wfld(link, DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK,
+ blkline_pck);
+ else
+ dsi_wfld(link, DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK,
+ blkline_pck);
+ dsi_wfld(link, DSI_VID_DPHY_TIME, REG_LINE_DURATION, line_duration);
+ if (burst_mode) {
+ dsi_wfld(link, DSI_VID_BLKSIZE1, BLKEOL_PCK, blkeol_pck);
+ dsi_wfld(link, DSI_VID_PCK_TIME, BLKEOL_DURATION,
+ blkeol_duration);
+ dsi_wfld(link, DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT,
+ blkeol_pck - 6);
+ dsi_wfld(link, DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT,
+ blkeol_pck);
+ }
+ if (sync_is_pulse)
+ dsi_wfld(link, DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT,
+ blkline_pck - 6);
+}
+
+static void set_vsync_method(u8 idx, struct mcde_port *port)
+{
+ u32 out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ u32 src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ switch (port->frame_trig) {
+ case MCDE_TRIG_HW:
+ src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ break;
+ case MCDE_TRIG_SW:
+ src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ break;
+ default:
+ src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ break;
+ }
+
+ switch (port->sync_src) {
+ case MCDE_SYNCSRC_OFF:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ break;
+ case MCDE_SYNCSRC_TE0:
+ out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0;
+ if (src_synch ==
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE) {
+ dev_dbg(&mcde_dev->dev, "%s: badly configured "
+ "frame sync, TE0 defaulting "
+ "to hw frame trig\n", __func__);
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ }
+ break;
+ case MCDE_SYNCSRC_TE1:
+ out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE1;
+ if (src_synch ==
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE) {
+ dev_dbg(&mcde_dev->dev, "%s: badly configured "
+ "frame sync, TE1 defaulting "
+ "to hw frame trig\n", __func__);
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ }
+ break;
+ case MCDE_SYNCSRC_BTA:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ break;
+ case MCDE_SYNCSRC_TE_POLLING:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ if (src_synch ==
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE) {
+ dev_dbg(&mcde_dev->dev, "%s: badly configured "
+ "frame sync, TE_POLLING defaulting "
+ "to hw frame trig\n", __func__);
+ src_synch =
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ }
+ break;
+ default:
+ out_synch_src =
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE;
+ dev_dbg(&mcde_dev->dev, "%s: no sync src selected, "
+ "defaulting to DSI BTA with "
+ "hw frame trig\n", __func__);
+ break;
+ }
+ } else if (port->type == MCDE_PORTTYPE_DPI) {
+ out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER;
+ src_synch = port->update_auto_trig ?
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE :
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE;
+ }
+
+ mcde_wreg(MCDE_CHNL0SYNCHMOD +
+ idx * MCDE_CHNL0SYNCHMOD_GROUPOFFSET,
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH(src_synch) |
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(out_synch_src));
+}
+
+void update_channel_registers(enum mcde_chnl chnl_id, struct chnl_regs *regs,
+ struct mcde_port *port, enum mcde_fifo fifo,
+ struct mcde_video_mode *video_mode)
+{
+ u8 idx = chnl_id;
+ u32 fifo_wtrmrk = 0;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /*
+ * Select appropriate fifo watermark.
+ * Watermark will be saturated at fifo size inside MCDE.
+ */
+ fifo_wtrmrk = video_mode->xres /
+ get_pkt_div(video_mode->xres, port, fifo);
+
+ dev_vdbg(&mcde_dev->dev, "%s fifo_watermark=%d for chnl_id=%d\n",
+ __func__, fifo_wtrmrk, chnl_id);
+
+ switch (chnl_id) {
+ case MCDE_CHNL_A:
+ mcde_wfld(MCDE_CTRLA, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_B:
+ mcde_wfld(MCDE_CTRLB, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_C0:
+ mcde_wfld(MCDE_CTRLC0, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ case MCDE_CHNL_C1:
+ mcde_wfld(MCDE_CTRLC1, FIFOWTRMRK, fifo_wtrmrk);
+ break;
+ default:
+ break;
+ }
+
+ set_vsync_method(idx, port);
+
+ mcde_wreg(MCDE_CHNL0CONF + idx * MCDE_CHNL0CONF_GROUPOFFSET,
+ MCDE_CHNL0CONF_PPL(regs->ppl-1) |
+ MCDE_CHNL0CONF_LPF(regs->lpf-1));
+ mcde_wreg(MCDE_CHNL0STAT + idx * MCDE_CHNL0STAT_GROUPOFFSET,
+ MCDE_CHNL0STAT_CHNLBLBCKGND_EN(false) |
+ MCDE_CHNL0STAT_CHNLRD(true));
+ mcde_wreg(MCDE_CHNL0BCKGNDCOL + idx * MCDE_CHNL0BCKGNDCOL_GROUPOFFSET,
+ MCDE_CHNL0BCKGNDCOL_B(0) |
+ MCDE_CHNL0BCKGNDCOL_G(0) |
+ MCDE_CHNL0BCKGNDCOL_R(0));
+
+ if (chnl_id == MCDE_CHNL_A || chnl_id == MCDE_CHNL_B) {
+ u32 mcde_crx1;
+ u32 mcde_pal0x;
+ u32 mcde_pal1x;
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_crx1 = MCDE_CRA1;
+ mcde_pal0x = MCDE_PAL0A;
+ mcde_pal1x = MCDE_PAL1A;
+ mcde_wfld(MCDE_CRA0, PALEN, regs->palette_enable);
+ } else {
+ mcde_crx1 = MCDE_CRB1;
+ mcde_pal0x = MCDE_PAL0B;
+ mcde_pal1x = MCDE_PAL1B;
+ mcde_wfld(MCDE_CRB0, PALEN, regs->palette_enable);
+ }
+ mcde_wreg(mcde_crx1,
+ MCDE_CRA1_PCD(regs->pcd) |
+ MCDE_CRA1_CLKSEL(regs->clksel) |
+ MCDE_CRA1_CDWIN(regs->cdwin) |
+ MCDE_CRA1_OUTBPP(bpp2outbpp(regs->bpp)) |
+ MCDE_CRA1_BCD(regs->bcd) |
+ MCDE_CRA1_CLKTYPE(regs->internal_clk));
+ if (regs->palette_enable) {
+ int i;
+ for (i = 0; i < 256; i++) {
+ mcde_wreg(mcde_pal0x,
+ MCDE_PAL0A_GREEN(regs->map_g(i)) |
+ MCDE_PAL0A_BLUE(regs->map_b(i)));
+ mcde_wreg(mcde_pal1x,
+ MCDE_PAL1A_RED(regs->map_r(i)));
+ }
+ }
+ }
+
+ /* Formatter */
+ if (port->type == MCDE_PORTTYPE_DSI) {
+ u8 fidx;
+ u32 temp, packet;
+ /* pkt_div is used to avoid underflow in output fifo for
+ * large packets */
+ u32 pkt_div = 1;
+ u32 dsi_delay0 = 0;
+ u32 screen_ppl, screen_lpf;
+
+ fidx = get_dsi_formatter_id(port);
+
+ screen_ppl = video_mode->xres;
+ screen_lpf = video_mode->yres;
+
+ pkt_div = get_pkt_div(screen_ppl, port, fifo);
+
+ if (video_mode->interlaced)
+ screen_lpf /= 2;
+
+ /* pkt_delay_progressive = pixelclock * htot /
+ * (1E12 / 160E6) / pkt_div */
+ dsi_delay0 = (video_mode->pixclock) *
+ (video_mode->xres + video_mode->hbp +
+ video_mode->hfp) /
+ (100000000 / ((mcde_clk_rate / 10000))) / pkt_div;
+
+ if ((screen_ppl == SCREEN_PPL_CEA2) &&
+ (screen_lpf == SCREEN_LPF_CEA2))
+ dsi_delay0 += DSI_DELAY0_CEA2_ADD;
+
+ temp = mcde_rreg(MCDE_DSIVID0CONF0 +
+ fidx * MCDE_DSIVID0CONF0_GROUPOFFSET);
+ mcde_wreg(MCDE_DSIVID0CONF0 +
+ fidx * MCDE_DSIVID0CONF0_GROUPOFFSET,
+ (temp & ~MCDE_DSIVID0CONF0_PACKING_MASK) |
+ MCDE_DSIVID0CONF0_PACKING(regs->dsipacking));
+ /* no extra command byte in video mode */
+ if (port->mode == MCDE_PORTMODE_CMD)
+ packet = ((screen_ppl / pkt_div * regs->bpp) >> 3) + 1;
+ else
+ packet = ((screen_ppl / pkt_div * regs->bpp) >> 3);
+ mcde_wreg(MCDE_DSIVID0FRAME +
+ fidx * MCDE_DSIVID0FRAME_GROUPOFFSET,
+ MCDE_DSIVID0FRAME_FRAME(packet * pkt_div * screen_lpf));
+ mcde_wreg(MCDE_DSIVID0PKT + fidx * MCDE_DSIVID0PKT_GROUPOFFSET,
+ MCDE_DSIVID0PKT_PACKET(packet));
+ mcde_wreg(MCDE_DSIVID0SYNC +
+ fidx * MCDE_DSIVID0SYNC_GROUPOFFSET,
+ MCDE_DSIVID0SYNC_SW(0) |
+ MCDE_DSIVID0SYNC_DMA(0));
+ mcde_wreg(MCDE_DSIVID0CMDW +
+ fidx * MCDE_DSIVID0CMDW_GROUPOFFSET,
+ MCDE_DSIVID0CMDW_CMDW_START(DCS_CMD_WRITE_START) |
+ MCDE_DSIVID0CMDW_CMDW_CONTINUE(DCS_CMD_WRITE_CONTINUE));
+ mcde_wreg(MCDE_DSIVID0DELAY0 +
+ fidx * MCDE_DSIVID0DELAY0_GROUPOFFSET,
+ MCDE_DSIVID0DELAY0_INTPKTDEL(dsi_delay0));
+ mcde_wreg(MCDE_DSIVID0DELAY1 +
+ fidx * MCDE_DSIVID0DELAY1_GROUPOFFSET,
+ MCDE_DSIVID0DELAY1_TEREQDEL(0) |
+ MCDE_DSIVID0DELAY1_FRAMESTARTDEL(0));
+
+ if (port->mode == MCDE_PORTMODE_VID)
+ update_vid_frame_parameters(port, video_mode,
+ regs->bpp / 8);
+ } else if (port->type == MCDE_PORTTYPE_DPI &&
+ !port->phy.dpi.tv_mode) {
+ /* DPI LCD Mode */
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_wreg(MCDE_SYNCHCONFA,
+ MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFA_HWREQVCNT(
+ video_mode->yres - 1) |
+ MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFA_SWINTVCNT(
+ video_mode->yres - 1));
+ } else if (chnl_id == MCDE_CHNL_B) {
+ mcde_wreg(MCDE_SYNCHCONFB,
+ MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFB_HWREQVCNT(
+ video_mode->yres - 1) |
+ MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(
+ ACTIVE_VIDEO) |
+ MCDE_SYNCHCONFB_SWINTVCNT(
+ video_mode->yres - 1));
+ }
+ }
+
+ if (regs->roten) {
+ u32 stripwidth;
+ u32 stripwidth_val;
+
+ /* calc strip width, 32 bits used internally */
+ stripwidth = regs->rotbufsize / (video_mode->yres * 4);
+ if (stripwidth >= 32)
+ stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_32PIX;
+ else if (stripwidth >= 16)
+ stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_16PIX;
+ else if (stripwidth >= 8)
+ stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_8PIX;
+ else if (stripwidth >= 4)
+ stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_4PIX;
+ else
+ stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_2PIX;
+ dev_vdbg(&mcde_dev->dev, "%s stripwidth=%d\n", __func__,
+ 1 << (stripwidth_val + 1));
+ mcde_wreg(MCDE_ROTADD0A + chnl_id * MCDE_ROTADD0A_GROUPOFFSET,
+ regs->rotbuf1);
+ mcde_wreg(MCDE_ROTADD1A + chnl_id * MCDE_ROTADD1A_GROUPOFFSET,
+ regs->rotbuf2);
+ mcde_wreg(MCDE_ROTACONF + chnl_id * MCDE_ROTACONF_GROUPOFFSET,
+ MCDE_ROTACONF_ROTBURSTSIZE_ENUM(HW_8W) |
+ MCDE_ROTACONF_ROTDIR(regs->rotdir) |
+ MCDE_ROTACONF_STRIP_WIDTH(stripwidth_val) |
+ MCDE_ROTACONF_RD_MAXOUT_ENUM(4_REQ) |
+ MCDE_ROTACONF_WR_MAXOUT_ENUM(8_REQ));
+ }
+
+ /* Blending */
+ if (chnl_id == MCDE_CHNL_A) {
+ mcde_wfld(MCDE_CRA0, BLENDEN, regs->blend_en);
+ mcde_wfld(MCDE_CRA0, BLENDCTRL, regs->blend_ctrl);
+ mcde_wfld(MCDE_CRA0, ALPHABLEND, regs->alpha_blend);
+ } else if (chnl_id == MCDE_CHNL_B) {
+ mcde_wfld(MCDE_CRB0, BLENDEN, regs->blend_en);
+ mcde_wfld(MCDE_CRB0, BLENDCTRL, regs->blend_ctrl);
+ mcde_wfld(MCDE_CRB0, ALPHABLEND, regs->alpha_blend);
+ }
+
+ dev_vdbg(&mcde_dev->dev, "Channel registers setup, chnl=%d\n", chnl_id);
+ regs->dirty = false;
+}
+
+static int enable_mcde_hw(void)
+{
+ int ret;
+ int i;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ cancel_delayed_work(&hw_timeout_work);
+ schedule_delayed_work(&hw_timeout_work,
+ msecs_to_jiffies(MCDE_SLEEP_WATCHDOG));
+
+ for (i = 0; i < num_channels; i++) {
+ struct mcde_chnl_state *chnl = &channels[i];
+ if (chnl->state == CHNLSTATE_SUSPEND) {
+ /* Mark all registers as dirty */
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+ chnl->ovly0->regs.dirty = true;
+ chnl->ovly0->regs.dirty_buf = true;
+ if (chnl->ovly1) {
+ chnl->ovly1->regs.dirty = true;
+ chnl->ovly1->regs.dirty_buf = true;
+ }
+ chnl->regs.dirty = true;
+ chnl->col_regs.dirty = true;
+ chnl->tv_regs.dirty = true;
+ atomic_set(&chnl->vcmp_cnt, 0);
+ }
+ }
+
+ if (mcde_is_enabled) {
+ dev_vdbg(&mcde_dev->dev, "%s - already enabled\n", __func__);
+ return 0;
+ }
+
+ enable_clocks_and_power(mcde_dev);
+
+ ret = request_irq(mcde_irq, mcde_irq_handler, 0, "mcde",
+ &mcde_dev->dev);
+ if (ret) {
+ dev_dbg(&mcde_dev->dev, "Failed to request irq (irq=%d)\n",
+ mcde_irq);
+ cancel_delayed_work(&hw_timeout_work);
+ return -EINVAL;
+ }
+
+ update_mcde_registers();
+
+ dev_vdbg(&mcde_dev->dev, "%s - enable done\n", __func__);
+
+ mcde_is_enabled = true;
+ return 0;
+}
+
+/* DSI */
+static int mcde_dsi_direct_cmd_write(struct mcde_chnl_state *chnl,
+ bool dcs, u8 cmd, u8 *data, int len)
+{
+ int i, ret = 0;
+ u32 wrdat[4] = { 0, 0, 0, 0 };
+ u32 settings;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 counter = DSI_WRITE_CMD_TIMEOUT;
+
+ if (len > MCDE_MAX_DSI_DIRECT_CMD_WRITE ||
+ chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ _mcde_chnl_enable(chnl);
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE);
+
+ if (dcs) {
+ wrdat[0] = cmd;
+ for (i = 1; i <= len; i++)
+ wrdat[i>>2] |= ((u32)data[i-1] << ((i & 3) * 8));
+ } else {
+ /* no explicit cmd byte for generic_write, only params */
+ for (i = 0; i < len; i++)
+ wrdat[i>>2] |= ((u32)data[i] << ((i & 3) * 8));
+ }
+
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(len > 1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(len+1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true);
+ if (dcs) {
+ if (len == 0)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_SHORT_WRITE_0);
+ else if (len == 1)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_SHORT_WRITE_1);
+ else
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ DCS_LONG_WRITE);
+ } else {
+ if (len == 0)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_0);
+ else if (len == 1)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_1);
+ else if (len == 2)
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_SHORT_WRITE_2);
+ else
+ settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ GENERIC_LONG_WRITE);
+ }
+
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, wrdat[0]);
+ if (len > 3)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT1, wrdat[1]);
+ if (len > 7)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT2, wrdat[2]);
+ if (len > 11)
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT3, wrdat[3]);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ /* loop will normally run zero or one time until WRITE_COMPLETED */
+ while (!dsi_rfld(link, DSI_DIRECT_CMD_STS, WRITE_COMPLETED)
+ && --counter)
+ cpu_relax();
+
+ if (!counter) {
+ dev_err(&mcde_dev->dev,
+ "%s: DSI write cmd 0x%x timeout on DSI link %u!\n",
+ __func__, cmd, link);
+ ret = -ETIME;
+ } else {
+ /* inform if >100 loops before command completion */
+ if (counter < (DSI_WRITE_CMD_TIMEOUT-DSI_WRITE_CMD_TIMEOUT/10))
+ dev_vdbg(&mcde_dev->dev,
+ "%s: %u loops for DSI command %x completion\n",
+ __func__, (DSI_WRITE_CMD_TIMEOUT - counter),
+ cmd);
+
+ dev_vdbg(&mcde_dev->dev, "DSI Write ok %x error %x\n",
+ dsi_rreg(link, DSI_DIRECT_CMD_STS_FLAG),
+ dsi_rreg(link, DSI_CMD_MODE_STS_FLAG));
+ }
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+
+int mcde_dsi_generic_write(struct mcde_chnl_state *chnl, u8* para, int len)
+{
+ return mcde_dsi_direct_cmd_write(chnl, false, 0, para, len);
+}
+
+int mcde_dsi_dcs_write(struct mcde_chnl_state *chnl, u8 cmd, u8* data, int len)
+{
+ return mcde_dsi_direct_cmd_write(chnl, true, cmd, data, len);
+}
+
+int mcde_dsi_dcs_read(struct mcde_chnl_state *chnl,
+ u8 cmd, u32 *data, int *len)
+{
+ int ret = 0;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 settings;
+ bool ok = false;
+ bool error, ack_with_err;
+ u8 nbr_of_retries = DSI_READ_NBR_OF_RETRIES;
+
+ if (*len > MCDE_MAX_DCS_READ || chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ _mcde_chnl_enable(chnl);
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_READ);
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(READ) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(1) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_READ);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, cmd);
+
+ do {
+ u8 wait = DSI_READ_TIMEOUT;
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_RD_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ while (wait-- && !(error = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ READ_COMPLETED_WITH_ERR)) &&
+ !(ok = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ READ_COMPLETED)))
+ udelay(DSI_READ_DELAY);
+
+ ack_with_err = dsi_rfld(link, DSI_DIRECT_CMD_STS,
+ ACKNOWLEDGE_WITH_ERR_RECEIVED);
+ if (ack_with_err)
+ dev_warn(&mcde_dev->dev,
+ "DCS Acknowledge Error Report %.4X\n",
+ dsi_rfld(link, DSI_DIRECT_CMD_STS, ACK_VAL));
+ } while (--nbr_of_retries && ack_with_err);
+
+ if (ok) {
+ int rdsize;
+ u32 rddat;
+
+ rdsize = dsi_rfld(link, DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE);
+ rddat = dsi_rreg(link, DSI_DIRECT_CMD_RDDAT);
+ if (rdsize < *len)
+ dev_warn(&mcde_dev->dev, "DCS incomplete read %d<%d"
+ " (%.8X)\n", rdsize, *len, rddat);
+ *len = min(*len, rdsize);
+ memcpy(data, &rddat, *len);
+ } else {
+ dev_err(&mcde_dev->dev, "DCS read failed, err=%d, sts=%X\n",
+ error, dsi_rreg(link, DSI_DIRECT_CMD_STS));
+ ret = -EIO;
+ }
+
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+
+/*
+ * Set Maximum Return Packet size is a command that specifies the
+ * maximum size of the payload transmitted from peripheral back to
+ * the host processor.
+ *
+ * During power-on or reset sequence, the Maximum Return Packet Size
+ * is set to a default value of one. In order to be able to use
+ * mcde_dsi_dcs_read for reading more than 1 byte at a time, this
+ * parameter should be set by the host processor to the desired value
+ * in the initialization routine before commencing normal operation.
+ */
+int mcde_dsi_set_max_pkt_size(struct mcde_chnl_state *chnl)
+{
+ u32 settings;
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+
+ if (chnl->port.type != MCDE_PORTTYPE_DSI)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EIO;
+ }
+
+ set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE);
+
+ /*
+ * Set Maximum Return Packet Size is a two-byte command packet
+ * that specifies the maximum size of the payload as u16 value.
+ * The order of bytes is: MaxSize LSB, MaxSize MSB
+ */
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(
+ SET_MAX_PKT_SIZE);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, MCDE_MAX_DCS_READ);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_IDLE);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return 0;
+}
+
+static void dsi_te_poll_req(struct mcde_chnl_state *chnl)
+{
+ u8 lnk = chnl->port.link;
+ const struct mcde_port *port = &chnl->port;
+
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, false);
+ if (port->ifc == 0)
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, true);
+ if (port->ifc == 1)
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true);
+ dsi_wfld(lnk, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, true);
+}
+
+static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl,
+ unsigned int timeout)
+{
+ mod_timer(&chnl->dsi_te_timer,
+ jiffies +
+ msecs_to_jiffies(timeout));
+}
+
+static void dsi_te_timer_function(unsigned long arg)
+{
+ struct mcde_chnl_state *chnl;
+ u8 lnk;
+
+ if (arg >= num_channels) {
+ dev_err(&mcde_dev->dev, "%s invalid arg:%ld\n", __func__, arg);
+ return;
+ }
+
+ chnl = &channels[arg];
+
+ if (mcde_is_enabled && chnl->enabled && chnl->formatter_updated) {
+ lnk = chnl->port.link;
+ /* No TE answer; force stop */
+ dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, true);
+ udelay(20);
+ dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, false);
+ dev_info(&mcde_dev->dev, "DSI%d force stop\n", lnk);
+ dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT);
+ } else {
+ dev_info(&mcde_dev->dev, "1:DSI force stop\n");
+ }
+}
+
+static void dsi_te_request(struct mcde_chnl_state *chnl)
+{
+ u8 link = chnl->port.link;
+ u8 virt_id = chnl->port.phy.dsi.virt_id;
+ u32 settings;
+
+ dev_vdbg(&mcde_dev->dev, "Request BTA TE, chnl=%d\n",
+ chnl->id);
+
+ set_channel_state_atomic(chnl, CHNLSTATE_WAIT_TE);
+
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true);
+ dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true);
+ dsi_wfld(link, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF);
+ settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(TE_REQ) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) |
+ DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_SHORT_WRITE_1);
+ dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings);
+ dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, DCS_CMD_SET_TEAR_ON);
+ dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR,
+ DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true));
+ dsi_wfld(link, DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, true);
+ dsi_wreg(link, DSI_CMD_MODE_STS_CLR,
+ DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true));
+ dsi_wfld(link, DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, true);
+ dsi_wreg(link, DSI_DIRECT_CMD_SEND, true);
+}
+
+/* MCDE channels */
+static struct mcde_chnl_state *_mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port)
+{
+ int i;
+ struct mcde_chnl_state *chnl = NULL;
+ struct mcde_platform_data *pdata = mcde_dev->dev.platform_data;
+
+ static struct mcde_col_transform ycbcr_2_rgb = {
+ /* Note that in MCDE YUV 422 pixels come as VYU pixels */
+ .matrix = {
+ {0xff30, 0x012a, 0xff9c},
+ {0x0000, 0x012a, 0x0204},
+ {0x0199, 0x012a, 0x0000},
+ },
+ .offset = {0x0088, 0xfeeb, 0xff21},
+ };
+
+ static struct mcde_col_transform rgb_2_ycbcr = {
+ .matrix = {
+ {0x0042, 0x0081, 0x0019},
+ {0xffda, 0xffb6, 0x0070},
+ {0x0070, 0xffa2, 0xffee},
+ },
+ .offset = {0x0010, 0x0080, 0x0080},
+ };
+
+ /* Allocate channel */
+ for (i = 0; i < num_channels; i++) {
+ if (chnl_id == channels[i].id)
+ chnl = &channels[i];
+ }
+ if (!chnl) {
+ dev_dbg(&mcde_dev->dev, "Invalid channel, chnl=%d\n", chnl_id);
+ return ERR_PTR(-EINVAL);
+ }
+ if (chnl->reserved) {
+ dev_dbg(&mcde_dev->dev, "Channel in use, chnl=%d\n", chnl_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ chnl->port = *port;
+ chnl->fifo = fifo;
+ chnl->formatter_updated = false;
+ chnl->ycbcr_2_rgb = ycbcr_2_rgb;
+ chnl->rgb_2_ycbcr = rgb_2_ycbcr;
+
+ chnl->blend_en = true;
+ chnl->blend_ctrl = MCDE_CRA0_BLENDCTRL_SOURCE;
+ chnl->alpha_blend = 0xFF;
+ chnl->rotbuf1 = pdata->rotbuf1;
+ chnl->rotbuf2 = pdata->rotbuf2;
+ chnl->rotbufsize = pdata->rotbufsize;
+
+ _mcde_chnl_apply(chnl);
+ chnl->reserved = true;
+
+ if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ chnl->clk_dpi = clk_get(&mcde_dev->dev, CLK_DPI);
+ if (chnl->port.phy.dpi.tv_mode)
+ chnl->vcmp_per_field = true;
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ dsi_use_clk_framework) {
+ char dsihs_name[10];
+ char dsilp_name[10];
+
+ sprintf(dsihs_name, "dsihs%d", port->link);
+ sprintf(dsilp_name, "dsilp%d", port->link);
+
+ chnl->clk_dsi_lp = clk_get(&mcde_dev->dev, dsilp_name);
+ chnl->clk_dsi_hs = clk_get(&mcde_dev->dev, dsihs_name);
+ if (port->phy.dsi.lp_freq != clk_round_rate(chnl->clk_dsi_lp,
+ port->phy.dsi.lp_freq))
+ dev_warn(&mcde_dev->dev, "Could not set dsi lp freq"
+ " to %d\n", port->phy.dsi.lp_freq);
+ WARN_ON_ONCE(clk_set_rate(chnl->clk_dsi_lp,
+ port->phy.dsi.lp_freq));
+ if (port->phy.dsi.hs_freq != clk_round_rate(chnl->clk_dsi_hs,
+ port->phy.dsi.hs_freq))
+ dev_warn(&mcde_dev->dev, "Could not set dsi hs freq"
+ " to %d\n", port->phy.dsi.hs_freq);
+ WARN_ON_ONCE(clk_set_rate(chnl->clk_dsi_hs,
+ port->phy.dsi.hs_freq));
+ }
+ return chnl;
+}
+
+static int _mcde_chnl_apply(struct mcde_chnl_state *chnl)
+{
+ bool roten = false;
+ u8 rotdir = 0;
+
+ if (chnl->rotation == MCDE_DISPLAY_ROT_90_CCW) {
+ roten = true;
+ rotdir = MCDE_ROTACONF_ROTDIR_CCW;
+ } else if (chnl->rotation == MCDE_DISPLAY_ROT_90_CW) {
+ roten = true;
+ rotdir = MCDE_ROTACONF_ROTDIR_CW;
+ }
+ /* REVIEW: 180 deg? */
+
+ chnl->regs.bpp = portfmt2bpp(chnl->port.pixel_format);
+ chnl->regs.roten = roten;
+ chnl->regs.rotdir = rotdir;
+ chnl->regs.rotbuf1 = chnl->rotbuf1;
+ chnl->regs.rotbuf2 = chnl->rotbuf2;
+ chnl->regs.rotbufsize = chnl->rotbufsize;
+ chnl->regs.palette_enable = chnl->palette_enable;
+ chnl->regs.map_r = chnl->map_r;
+ chnl->regs.map_g = chnl->map_g;
+ chnl->regs.map_b = chnl->map_b;
+ if (chnl->port.type == MCDE_PORTTYPE_DSI) {
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_MCDECLK;
+ chnl->regs.dsipacking =
+ portfmt2dsipacking(chnl->port.pixel_format);
+ } else if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ if (chnl->port.phy.dpi.tv_mode) {
+ chnl->regs.internal_clk = false;
+ chnl->regs.bcd = true;
+ if (chnl->id == MCDE_CHNL_A)
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_TV1CLK;
+ else
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_TV2CLK;
+ } else {
+ chnl->regs.internal_clk = true;
+ chnl->regs.clksel = MCDE_CRA1_CLKSEL_CLKPLL72;
+ chnl->regs.cdwin =
+ portfmt2cdwin(chnl->port.pixel_format);
+ chnl->regs.bcd = (chnl->port.phy.dpi.clock_div < 2);
+ if (!chnl->regs.bcd)
+ chnl->regs.pcd =
+ chnl->port.phy.dpi.clock_div - 2;
+ }
+ dpi_video_mode_apply(chnl);
+ }
+
+ chnl->regs.blend_ctrl = chnl->blend_ctrl;
+ chnl->regs.blend_en = chnl->blend_en;
+ chnl->regs.alpha_blend = chnl->alpha_blend;
+
+ chnl->regs.dirty = true;
+
+ dev_vdbg(&mcde_dev->dev, "Channel applied, chnl=%d\n", chnl->id);
+ return 0;
+}
+
+static void setup_channel(struct mcde_chnl_state *chnl)
+{
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+
+ if (chnl->port.type == MCDE_PORTTYPE_DPI && chnl->tv_regs.dirty)
+ update_dpi_registers(chnl->id, &chnl->tv_regs);
+ if ((chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B) &&
+ chnl->col_regs.dirty)
+ update_col_registers(chnl->id, &chnl->col_regs);
+ if (chnl->regs.dirty)
+ update_channel_registers(chnl->id, &chnl->regs, &chnl->port,
+ chnl->fifo, &chnl->vmode);
+}
+
+static void chnl_update_continous(struct mcde_chnl_state *chnl,
+ bool tripple_buffer)
+{
+ if (chnl->state == CHNLSTATE_RUNNING) {
+ if (!tripple_buffer)
+ wait_for_vcmp(chnl);
+ return;
+ }
+
+ setup_channel(chnl);
+ if (chnl->port.sync_src == MCDE_SYNCSRC_TE0) {
+ mcde_wfld(MCDE_CRC, SYCEN0, true);
+ } else if (chnl->port.sync_src == MCDE_SYNCSRC_TE1) {
+ mcde_wfld(MCDE_VSCRC1, VSSEL, 1);
+ mcde_wfld(MCDE_CRC, SYCEN1, true);
+ }
+
+ enable_flow(chnl);
+}
+
+static void chnl_update_non_continous(struct mcde_chnl_state *chnl)
+{
+ /* Commit settings to registers */
+ setup_channel(chnl);
+
+ if (chnl->port.type == MCDE_PORTTYPE_DSI) {
+ if (chnl->port.sync_src == MCDE_SYNCSRC_OFF) {
+ if (chnl->port.frame_trig == MCDE_TRIG_SW) {
+ do_softwaretrig(chnl);
+ } else {
+ enable_flow(chnl);
+ disable_flow(chnl);
+ }
+ dev_vdbg(&mcde_dev->dev, "Channel update (no sync), "
+ "chnl=%d\n", chnl->id);
+ } else if (chnl->port.sync_src == MCDE_SYNCSRC_BTA) {
+ if (chnl->power_mode == MCDE_DISPLAY_PM_ON) {
+ dsi_te_request(chnl);
+ } else {
+ if (chnl->port.frame_trig == MCDE_TRIG_SW)
+ do_softwaretrig(chnl);
+ }
+ if (chnl->port.frame_trig == MCDE_TRIG_HW) {
+ /*
+ * During BTA TE the MCDE block will be stalled,
+ * once the TE is received the DMA trig will
+ * happen
+ */
+ enable_flow(chnl);
+ disable_flow(chnl);
+ }
+ }
+ }
+}
+
+static void chnl_update_overlay(struct mcde_chnl_state *chnl,
+ struct mcde_ovly_state *ovly)
+{
+ if (!ovly)
+ return;
+
+ if (ovly->regs.dirty_buf) {
+ if (!chnl->port.update_auto_trig)
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+ update_overlay_registers_on_the_fly(ovly->idx, &ovly->regs);
+ mcde_debugfs_overlay_update(chnl->id, ovly != chnl->ovly0);
+ }
+ if (ovly->regs.dirty) {
+ if (!chnl->port.update_auto_trig)
+ set_channel_state_sync(chnl, CHNLSTATE_SETUP);
+ chnl_ovly_pixel_format_apply(chnl, ovly);
+ update_overlay_registers(ovly->idx, &ovly->regs, &chnl->port,
+ chnl->fifo, chnl->regs.x, chnl->regs.y,
+ chnl->regs.ppl, chnl->regs.lpf, ovly->stride,
+ chnl->vmode.interlaced, chnl->rotation);
+ if (chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B)
+ update_col_registers(chnl->id, &chnl->col_regs);
+ }
+}
+
+static int _mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ /* TODO: lock & make wait->trig async */
+ if (!chnl->enabled || !update_area
+ || (update_area->w == 0 && update_area->h == 0)) {
+ return -EINVAL;
+ }
+
+ if (chnl->port.update_auto_trig && tripple_buffer)
+ wait_for_vcmp(chnl);
+
+ chnl->regs.x = update_area->x;
+ chnl->regs.y = update_area->y;
+ /* TODO Crop against video_mode.xres and video_mode.yres */
+ chnl->regs.ppl = update_area->w;
+ chnl->regs.lpf = update_area->h;
+ if (chnl->port.type == MCDE_PORTTYPE_DPI &&
+ chnl->port.phy.dpi.tv_mode) {
+ /* subtract border */
+ chnl->regs.ppl -= chnl->tv_regs.dho + chnl->tv_regs.alw;
+ /* subtract double borders, ie. for both fields */
+ chnl->regs.lpf -= 2 * (chnl->tv_regs.dvo + chnl->tv_regs.bsl);
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI &&
+ chnl->vmode.interlaced)
+ chnl->regs.lpf /= 2;
+
+ chnl_update_overlay(chnl, chnl->ovly0);
+ chnl_update_overlay(chnl, chnl->ovly1);
+
+ if (chnl->port.update_auto_trig)
+ chnl_update_continous(chnl, tripple_buffer);
+ else
+ chnl_update_non_continous(chnl);
+
+ dev_vdbg(&mcde_dev->dev, "Channel updated, chnl=%d\n", chnl->id);
+ mcde_debugfs_channel_update(chnl->id);
+ return 0;
+}
+
+static int _mcde_chnl_enable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl->enabled = true;
+ return 0;
+}
+
+/* API entry points */
+/* MCDE channels */
+struct mcde_chnl_state *mcde_chnl_get(enum mcde_chnl chnl_id,
+ enum mcde_fifo fifo, const struct mcde_port *port)
+{
+ struct mcde_chnl_state *chnl;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+ chnl = _mcde_chnl_get(chnl_id, fifo, port);
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return chnl;
+}
+
+int mcde_chnl_set_pixel_format(struct mcde_chnl_state *chnl,
+ enum mcde_port_pix_fmt pix_fmt)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+ chnl->port.pixel_format = pix_fmt;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_set_palette(struct mcde_chnl_state *chnl,
+ struct mcde_palette_table *palette)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+ if (palette != NULL) {
+ chnl->map_r = palette->map_col_ch0;
+ chnl->map_g = palette->map_col_ch1;
+ chnl->map_b = palette->map_col_ch2;
+ chnl->palette_enable = true;
+ } else {
+ chnl->map_r = NULL;
+ chnl->map_g = NULL;
+ chnl->map_b = NULL;
+ chnl->palette_enable = false;
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+ return 0;
+}
+
+void mcde_chnl_set_col_convert(struct mcde_chnl_state *chnl,
+ struct mcde_col_transform *transform,
+ enum mcde_col_convert convert)
+{
+ switch (convert) {
+ case MCDE_CONVERT_RGB_2_YCBCR:
+ memcpy(&chnl->rgb_2_ycbcr, transform,
+ sizeof(struct mcde_col_transform));
+ /* force update: */
+ if (chnl->transform == &chnl->rgb_2_ycbcr) {
+ chnl->transform = NULL;
+ chnl->ovly0->dirty = true;
+ chnl->ovly1->dirty = true;
+ }
+ break;
+ case MCDE_CONVERT_YCBCR_2_RGB:
+ memcpy(&chnl->ycbcr_2_rgb, transform,
+ sizeof(struct mcde_col_transform));
+ /* force update: */
+ if (chnl->transform == &chnl->ycbcr_2_rgb) {
+ chnl->transform = NULL;
+ chnl->ovly0->dirty = true;
+ chnl->ovly1->dirty = true;
+ }
+ break;
+ default:
+ /* Trivial transforms are handled internally */
+ dev_warn(&mcde_dev->dev,
+ "%s: unsupported col convert\n", __func__);
+ break;
+ }
+}
+
+int mcde_chnl_set_video_mode(struct mcde_chnl_state *chnl,
+ struct mcde_video_mode *vmode)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl == NULL || vmode == NULL)
+ return -EINVAL;
+
+ chnl->vmode = *vmode;
+
+ chnl->ovly0->dirty = true;
+ if (chnl->ovly1)
+ chnl->ovly1->dirty = true;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL(mcde_chnl_set_video_mode);
+
+int mcde_chnl_set_rotation(struct mcde_chnl_state *chnl,
+ enum mcde_display_rotation rotation)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ if ((rotation == MCDE_DISPLAY_ROT_90_CW ||
+ rotation == MCDE_DISPLAY_ROT_90_CCW) &&
+ (chnl->id != MCDE_CHNL_A && chnl->id != MCDE_CHNL_B))
+ return -EINVAL;
+
+ chnl->rotation = rotation;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_set_power_mode(struct mcde_chnl_state *chnl,
+ enum mcde_display_power_mode power_mode)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ chnl->power_mode = power_mode;
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+
+ return 0;
+}
+
+int mcde_chnl_apply(struct mcde_chnl_state *chnl)
+{
+ int ret ;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+ ret = _mcde_chnl_apply(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+int mcde_chnl_update(struct mcde_chnl_state *chnl,
+ struct mcde_rectangle *update_area,
+ bool tripple_buffer)
+{
+ int ret;
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return -EINVAL;
+
+ mcde_lock(__func__, __LINE__);
+ enable_mcde_hw();
+ if (!chnl->formatter_updated)
+ (void)update_channel_static_registers(chnl);
+
+ if (chnl->regs.roten && !chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_enable(regulator_esram_epod));
+ chnl->esram_is_enabled = true;
+ } else if (!chnl->regs.roten && chnl->esram_is_enabled) {
+ WARN_ON_ONCE(regulator_disable(regulator_esram_epod));
+ chnl->esram_is_enabled = false;
+ }
+
+ ret = _mcde_chnl_update(chnl, update_area, tripple_buffer);
+
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+void mcde_chnl_put(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (chnl->enabled) {
+ stop_channel(chnl);
+ cancel_delayed_work(&hw_timeout_work);
+ disable_mcde_hw(false, true);
+ chnl->enabled = false;
+ }
+
+ chnl->reserved = false;
+ if (chnl->port.type == MCDE_PORTTYPE_DPI) {
+ clk_put(chnl->clk_dpi);
+ if (chnl->port.phy.dpi.tv_mode) {
+ chnl->vcmp_per_field = false;
+ chnl->even_vcmp = false;
+ }
+ } else if (chnl->port.type == MCDE_PORTTYPE_DSI) {
+ if (dsi_use_clk_framework) {
+ clk_put(chnl->clk_dsi_lp);
+ clk_put(chnl->clk_dsi_hs);
+ }
+ }
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_stop_flow(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ if (mcde_is_enabled && chnl->enabled)
+ stop_channel(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_enable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ _mcde_chnl_enable(chnl);
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+void mcde_chnl_disable(struct mcde_chnl_state *chnl)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+ cancel_delayed_work(&hw_timeout_work);
+ /* The channel must be stopped before it is disabled */
+ WARN_ON_ONCE(chnl->state == CHNLSTATE_RUNNING);
+ disable_mcde_hw(false, true);
+ chnl->enabled = false;
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__);
+}
+
+/* MCDE overlays */
+struct mcde_ovly_state *mcde_ovly_get(struct mcde_chnl_state *chnl)
+{
+ struct mcde_ovly_state *ovly;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!chnl->reserved)
+ return ERR_PTR(-EINVAL);
+
+ if (!chnl->ovly0->inuse)
+ ovly = chnl->ovly0;
+ else if (chnl->ovly1 && !chnl->ovly1->inuse)
+ ovly = chnl->ovly1;
+ else
+ ovly = ERR_PTR(-EBUSY);
+
+ if (!IS_ERR(ovly)) {
+ ovly->inuse = true;
+ ovly->paddr = 0;
+ ovly->stride = 0;
+ ovly->pix_fmt = MCDE_OVLYPIXFMT_RGB565;
+ ovly->src_x = 0;
+ ovly->src_y = 0;
+ ovly->dst_x = 0;
+ ovly->dst_y = 0;
+ ovly->dst_z = 0;
+ ovly->w = 0;
+ ovly->h = 0;
+ ovly->alpha_value = 0xFF;
+ ovly->alpha_source = MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA;
+ ovly->dirty = true;
+ mcde_ovly_apply(ovly);
+ }
+
+ return ovly;
+}
+
+void mcde_ovly_put(struct mcde_ovly_state *ovly)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ if (!ovly->inuse)
+ return;
+ if (ovly->regs.enabled) {
+ ovly->paddr = 0;
+ ovly->dirty = true;
+ mcde_ovly_apply(ovly);/* REVIEW: API call calling API call! */
+ }
+ ovly->inuse = false;
+}
+
+void mcde_ovly_set_source_buf(struct mcde_ovly_state *ovly, u32 paddr)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->dirty = paddr == 0 || ovly->paddr == 0;
+ ovly->dirty_buf = true;
+
+ ovly->paddr = paddr;
+}
+
+void mcde_ovly_set_source_info(struct mcde_ovly_state *ovly,
+ u32 stride, enum mcde_ovly_pix_fmt pix_fmt)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->stride = stride;
+ ovly->pix_fmt = pix_fmt;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_set_source_area(struct mcde_ovly_state *ovly,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->src_x = x;
+ ovly->src_y = y;
+ ovly->w = w;
+ ovly->h = h;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_set_dest_pos(struct mcde_ovly_state *ovly, u16 x, u16 y, u8 z)
+{
+ if (!ovly->inuse)
+ return;
+
+ ovly->dst_x = x;
+ ovly->dst_y = y;
+ ovly->dst_z = z;
+ ovly->dirty = true;
+}
+
+void mcde_ovly_apply(struct mcde_ovly_state *ovly)
+{
+ if (!ovly->inuse)
+ return;
+
+ mcde_lock(__func__, __LINE__);
+
+ if (ovly->dirty || ovly->dirty_buf) {
+ ovly->regs.ch_id = ovly->chnl->id;
+ ovly->regs.enabled = ovly->paddr != 0;
+ ovly->regs.baseaddress0 = ovly->paddr;
+ ovly->regs.baseaddress1 =
+ ovly->regs.baseaddress0 + ovly->stride;
+ ovly->regs.dirty_buf = true;
+ ovly->dirty_buf = false;
+ }
+ if (!ovly->dirty) {
+ mcde_unlock(__func__, __LINE__);
+ return;
+ }
+
+ switch (ovly->pix_fmt) {/* REVIEW: Extract to table */
+ case MCDE_OVLYPIXFMT_RGB565:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB565;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA5551:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_IRGB1555;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA4444:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB4444;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_RGB888:
+ ovly->regs.bits_per_pixel = 24;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBX8888:
+ ovly->regs.bits_per_pixel = 32;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_XRGB8888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = true;
+ ovly->regs.opq = true;
+ break;
+ case MCDE_OVLYPIXFMT_RGBA8888:
+ ovly->regs.bits_per_pixel = 32;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB8888;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = false;
+ break;
+ case MCDE_OVLYPIXFMT_YCbCr422:
+ ovly->regs.bits_per_pixel = 16;
+ ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_YCBCR422;
+ ovly->regs.bgr = false;
+ ovly->regs.bebo = false;
+ ovly->regs.opq = true;
+ break;
+ default:
+ break;
+ }
+
+ ovly->regs.ppl = ovly->w;
+ ovly->regs.lpf = ovly->h;
+ ovly->regs.cropx = ovly->src_x;
+ ovly->regs.cropy = ovly->src_y;
+ ovly->regs.xpos = ovly->dst_x;
+ ovly->regs.ypos = ovly->dst_y;
+ ovly->regs.z = ovly->dst_z > 0; /* 0 or 1 */
+ ovly->regs.col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED;
+ ovly->regs.alpha_source = ovly->alpha_source;
+ ovly->regs.alpha_value = ovly->alpha_value;
+
+ ovly->regs.dirty = true;
+ ovly->dirty = false;
+
+ mcde_unlock(__func__, __LINE__);
+
+ dev_vdbg(&mcde_dev->dev, "Overlay applied, idx=%d chnl=%d\n",
+ ovly->idx, ovly->chnl->id);
+}
+
+static int init_clocks_and_power(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->regulator_mcde_epod_id) {
+ regulator_mcde_epod = regulator_get(&pdev->dev,
+ pdata->regulator_mcde_epod_id);
+ if (IS_ERR(regulator_mcde_epod)) {
+ ret = PTR_ERR(regulator_mcde_epod);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_mcde_epod_id);
+ regulator_mcde_epod = NULL;
+ return ret;
+ }
+ } else {
+ dev_warn(&pdev->dev, "%s: No mcde regulator id supplied\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->regulator_esram_epod_id) {
+ regulator_esram_epod = regulator_get(&pdev->dev,
+ pdata->regulator_esram_epod_id);
+ if (IS_ERR(regulator_esram_epod)) {
+ ret = PTR_ERR(regulator_esram_epod);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_esram_epod_id);
+ regulator_esram_epod = NULL;
+ goto regulator_esram_err;
+ }
+ } else {
+ dev_warn(&pdev->dev, "%s: No esram regulator id supplied\n",
+ __func__);
+ }
+
+ if (pdata->regulator_vana_id) {
+ regulator_vana = regulator_get(&pdev->dev,
+ pdata->regulator_vana_id);
+ if (IS_ERR(regulator_vana)) {
+ ret = PTR_ERR(regulator_vana);
+ dev_warn(&pdev->dev,
+ "%s: Failed to get regulator '%s'\n",
+ __func__, pdata->regulator_vana_id);
+ regulator_vana = NULL;
+ goto regulator_vana_err;
+ }
+ } else {
+ dev_dbg(&pdev->dev, "%s: No vana regulator id supplied\n",
+ __func__);
+ }
+
+ if (!dsi_use_clk_framework) {
+ clock_dsi = clk_get(&pdev->dev, pdata->clock_dsi_id);
+ if (IS_ERR(clock_dsi))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dsi_id);
+
+ clock_dsi_lp = clk_get(&pdev->dev, pdata->clock_dsi_lp_id);
+ if (IS_ERR(clock_dsi_lp))
+ dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n",
+ __func__, pdata->clock_dsi_lp_id);
+ }
+
+ clock_mcde = clk_get(&pdev->dev, CLK_MCDE);
+ if (IS_ERR(clock_mcde)) {
+ ret = PTR_ERR(clock_mcde);
+ dev_warn(&pdev->dev, "%s: Failed to get mcde_clk\n", __func__);
+ goto clk_mcde_err;
+ }
+
+ return ret;
+
+clk_mcde_err:
+ if (!dsi_use_clk_framework) {
+ clk_put(clock_dsi_lp);
+ clk_put(clock_dsi);
+ }
+
+ if (regulator_vana)
+ regulator_put(regulator_vana);
+regulator_vana_err:
+ if (regulator_esram_epod)
+ regulator_put(regulator_esram_epod);
+regulator_esram_err:
+ regulator_put(regulator_mcde_epod);
+ return ret;
+}
+
+static void remove_clocks_and_power(struct platform_device *pdev)
+{
+ /* REVIEW: Release only if exist */
+ /* REVIEW: Remove make sure MCDE is done */
+ if (!dsi_use_clk_framework) {
+ clk_put(clock_dsi_lp);
+ clk_put(clock_dsi);
+ }
+ clk_put(clock_mcde);
+ if (regulator_vana)
+ regulator_put(regulator_vana);
+ regulator_put(regulator_mcde_epod);
+ regulator_put(regulator_esram_epod);
+}
+
+static int probe_hw(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ u32 pid;
+ struct resource *res;
+
+ dev_info(&mcde_dev->dev, "Probe HW\n");
+
+ /* Get MCDE HW version */
+ regulator_enable(regulator_mcde_epod);
+ clk_enable(clock_mcde);
+ pid = mcde_rreg(MCDE_PID);
+
+ dev_info(&mcde_dev->dev, "MCDE HW revision 0x%.8X\n", pid);
+
+ clk_disable(clock_mcde);
+ regulator_disable(regulator_mcde_epod);
+
+ switch (pid) {
+ case MCDE_VERSION_3_0_8:
+ num_dsilinks = 3;
+ num_channels = 4;
+ num_overlays = 6;
+ dsi_ifc_is_supported = true;
+ input_fifo_size = 128;
+ output_fifo_ab_size = 640;
+ output_fifo_c0c1_size = 160;
+ dsi_use_clk_framework = false;
+ dev_info(&mcde_dev->dev, "db8500 V2 HW\n");
+ break;
+ case MCDE_VERSION_4_0_4:
+ num_dsilinks = 2;
+ num_channels = 2;
+ num_overlays = 3;
+ input_fifo_size = 80;
+ output_fifo_ab_size = 320;
+ dsi_ifc_is_supported = false;
+ dsi_use_clk_framework = false;
+ dev_info(&mcde_dev->dev, "db5500 V2 HW\n");
+ break;
+ case MCDE_VERSION_4_1_3:
+ num_dsilinks = 3;
+ num_channels = 4;
+ num_overlays = 6;
+ dsi_ifc_is_supported = true;
+ input_fifo_size = 192;
+ output_fifo_ab_size = 640;
+ output_fifo_c0c1_size = 160;
+ dsi_use_clk_framework = false;
+ dev_info(&mcde_dev->dev, "db9540 V1 HW\n");
+ break;
+ case MCDE_VERSION_3_0_5:
+ /* Intentional */
+ case MCDE_VERSION_1_0_4:
+ /* Intentional */
+ default:
+ dev_err(&mcde_dev->dev, "Unsupported HW version\n");
+ ret = -ENOTSUPP;
+ goto unsupported_hw;
+ break;
+ }
+
+ channels = kzalloc(num_channels * sizeof(struct mcde_chnl_state),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto failed_channels_alloc;
+ }
+
+ overlays = kzalloc(num_overlays * sizeof(struct mcde_ovly_state),
+ GFP_KERNEL);
+ if (!overlays) {
+ ret = -ENOMEM;
+ goto failed_overlays_alloc;
+ }
+
+ dsiio = kzalloc(num_dsilinks * sizeof(*dsiio), GFP_KERNEL);
+ if (!dsiio) {
+ ret = -ENOMEM;
+ goto failed_dsi_alloc;
+ }
+
+ for (i = 0; i < num_dsilinks; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1+i);
+ if (!res) {
+ dev_dbg(&pdev->dev, "No DSI%d io defined\n", i);
+ ret = -EINVAL;
+ goto failed_get_dsi_io;
+ }
+ dsiio[i] = ioremap(res->start, res->end - res->start + 1);
+ if (!dsiio[i]) {
+ dev_dbg(&pdev->dev, "MCDE DSI%d iomap failed\n", i);
+ ret = -EINVAL;
+ goto failed_map_dsi_io;
+ }
+ dev_info(&pdev->dev, "MCDE DSI%d iomap: 0x%.8X->0x%.8X\n",
+ i, (u32)res->start, (u32)dsiio[i]);
+ }
+
+ /* Init MCDE */
+ for (i = 0; i < num_overlays; i++)
+ overlays[i].idx = i;
+
+ channels[0].ovly0 = &overlays[0];
+ channels[0].ovly1 = &overlays[1];
+ channels[1].ovly0 = &overlays[2];
+
+ if (pid == MCDE_VERSION_3_0_8) {
+ channels[1].ovly1 = &overlays[3];
+ channels[2].ovly0 = &overlays[4];
+ channels[3].ovly0 = &overlays[5];
+ }
+
+ mcde_debugfs_create(&mcde_dev->dev);
+ for (i = 0; i < num_channels; i++) {
+ channels[i].id = i;
+
+ channels[i].ovly0->chnl = &channels[i];
+ if (channels[i].ovly1)
+ channels[i].ovly1->chnl = &channels[i];
+
+ init_waitqueue_head(&channels[i].state_waitq);
+ init_waitqueue_head(&channels[i].vcmp_waitq);
+ init_timer(&channels[i].dsi_te_timer);
+ channels[i].dsi_te_timer.function =
+ dsi_te_timer_function;
+ channels[i].dsi_te_timer.data = i;
+
+ mcde_debugfs_channel_create(i, &channels[i]);
+ mcde_debugfs_overlay_create(i, 0);
+ if (channels[i].ovly1)
+ mcde_debugfs_overlay_create(i, 1);
+ }
+ (void) prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "mcde", 100);
+ mcde_clk_rate = clk_get_rate(clock_mcde);
+ dev_info(&mcde_dev->dev, "MCDE_CLK is %d MHz\n", mcde_clk_rate);
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "mcde");
+
+ return 0;
+
+failed_map_dsi_io:
+ for (i = 0; i < num_dsilinks; i++) {
+ if (dsiio[i])
+ iounmap(dsiio[i]);
+ }
+failed_get_dsi_io:
+ kfree(dsiio);
+ dsiio = NULL;
+failed_dsi_alloc:
+ kfree(overlays);
+ overlays = NULL;
+failed_overlays_alloc:
+ kfree(channels);
+ channels = NULL;
+unsupported_hw:
+failed_channels_alloc:
+ num_dsilinks = 0;
+ num_channels = 0;
+ num_overlays = 0;
+ return ret;
+}
+
+static int __devinit mcde_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct mcde_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ mcde_dev = pdev;
+
+ /* Hook up irq */
+ mcde_irq = platform_get_irq(pdev, 0);
+ if (mcde_irq <= 0) {
+ dev_dbg(&pdev->dev, "No irq defined\n");
+ ret = -EINVAL;
+ goto failed_irq_get;
+ }
+
+ /* Map I/O */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(&pdev->dev, "No MCDE io defined\n");
+ ret = -EINVAL;
+ goto failed_get_mcde_io;
+ }
+ mcdeio = ioremap(res->start, res->end - res->start + 1);
+ if (!mcdeio) {
+ dev_dbg(&pdev->dev, "MCDE iomap failed\n");
+ ret = -EINVAL;
+ goto failed_map_mcde_io;
+ }
+ dev_info(&pdev->dev, "MCDE iomap: 0x%.8X->0x%.8X\n",
+ (u32)res->start, (u32)mcdeio);
+
+ ret = init_clocks_and_power(pdev);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "%s: init_clocks_and_power failed\n"
+ , __func__);
+ goto failed_init_clocks;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&hw_timeout_work, work_sleep_function);
+
+ ret = probe_hw(pdev);
+ if (ret)
+ goto failed_probe_hw;
+
+ ret = enable_mcde_hw();
+ if (ret)
+ goto failed_mcde_enable;
+
+ return 0;
+
+failed_mcde_enable:
+failed_probe_hw:
+ remove_clocks_and_power(pdev);
+failed_init_clocks:
+ iounmap(mcdeio);
+failed_map_mcde_io:
+failed_get_mcde_io:
+failed_irq_get:
+ return ret;
+}
+
+static int __devexit mcde_remove(struct platform_device *pdev)
+{
+ struct mcde_chnl_state *chnl = &channels[0];
+
+ for (; chnl < &channels[num_channels]; chnl++) {
+ if (del_timer(&chnl->dsi_te_timer))
+ dev_vdbg(&mcde_dev->dev,
+ "%s dsi timer could not be stopped\n"
+ , __func__);
+ }
+
+ remove_clocks_and_power(pdev);
+ return 0;
+}
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+static int mcde_resume(struct platform_device *pdev)
+{
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+
+ if (enable_mcde_hw()) {
+ mcde_unlock(__func__, __LINE__);
+ return -EINVAL;
+ }
+
+ mcde_unlock(__func__, __LINE__);
+
+ return 0;
+}
+
+static int mcde_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+
+ dev_vdbg(&mcde_dev->dev, "%s\n", __func__);
+
+ mcde_lock(__func__, __LINE__);
+
+ cancel_delayed_work(&hw_timeout_work);
+
+ if (!mcde_is_enabled) {
+ mcde_unlock(__func__, __LINE__);
+ return 0;
+ }
+ disable_mcde_hw(true, true);
+
+ mcde_unlock(__func__, __LINE__);
+
+ return ret;
+}
+#endif
+
+static struct platform_driver mcde_driver = {
+ .probe = mcde_probe,
+ .remove = mcde_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)
+ .suspend = mcde_suspend,
+ .resume = mcde_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .name = "mcde",
+ },
+};
+
+int __init mcde_init(void)
+{
+ mutex_init(&mcde_hw_lock);
+ return platform_driver_register(&mcde_driver);
+}
+
+void mcde_exit(void)
+{
+ /* REVIEW: shutdown MCDE? */
+ platform_driver_unregister(&mcde_driver);
+}
diff --git a/drivers/video/mcde/mcde_mod.c b/drivers/video/mcde/mcde_mod.c
new file mode 100644
index 00000000000..60df0d4965f
--- /dev/null
+++ b/drivers/video/mcde/mcde_mod.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson MCDE driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <video/mcde.h>
+#include <video/mcde_fb.h>
+#include <video/mcde_dss.h>
+#include <video/mcde_display.h>
+
+/* Module init */
+
+static int __init mcde_subsystem_init(void)
+{
+ int ret;
+ pr_info("MCDE subsystem init begin\n");
+
+ /* MCDE module init sequence */
+ ret = mcde_init();
+ if (ret)
+ goto mcde_failed;
+ ret = mcde_display_init();
+ if (ret)
+ goto mcde_display_failed;
+ ret = mcde_dss_init();
+ if (ret)
+ goto mcde_dss_failed;
+ ret = mcde_fb_init();
+ if (ret)
+ goto mcde_fb_failed;
+ pr_info("MCDE subsystem init done\n");
+
+ goto done;
+mcde_fb_failed:
+ mcde_dss_exit();
+mcde_dss_failed:
+ mcde_display_exit();
+mcde_display_failed:
+ mcde_exit();
+mcde_failed:
+done:
+ return ret;
+}
+#ifdef MODULE
+module_init(mcde_subsystem_init);
+#else
+fs_initcall(mcde_subsystem_init);
+#endif
+
+static void __exit mcde_module_exit(void)
+{
+ mcde_exit();
+ mcde_display_exit();
+ mcde_dss_exit();
+}
+module_exit(mcde_module_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ST-Ericsson MCDE driver");
+
diff --git a/drivers/video/mcde/mcde_regs.h b/drivers/video/mcde/mcde_regs.h
new file mode 100644
index 00000000000..0eece3faea2
--- /dev/null
+++ b/drivers/video/mcde/mcde_regs.h
@@ -0,0 +1,5096 @@
+
+#define MCDE_VAL2REG(__reg, __fld, __val) \
+ (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)
+#define MCDE_REG2VAL(__reg, __fld, __val) \
+ (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT)
+
+#define MCDE_CR 0x00000000
+#define MCDE_CR_IFIFOCTRLEN_SHIFT 15
+#define MCDE_CR_IFIFOCTRLEN_MASK 0x00008000
+#define MCDE_CR_IFIFOCTRLEN(__x) \
+ MCDE_VAL2REG(MCDE_CR, IFIFOCTRLEN, __x)
+#define MCDE_CR_AUTOCLKG_EN_SHIFT 30
+#define MCDE_CR_AUTOCLKG_EN_MASK 0x40000000
+#define MCDE_CR_AUTOCLKG_EN(__x) \
+ MCDE_VAL2REG(MCDE_CR, AUTOCLKG_EN, __x)
+#define MCDE_CR_MCDEEN_SHIFT 31
+#define MCDE_CR_MCDEEN_MASK 0x80000000
+#define MCDE_CR_MCDEEN(__x) \
+ MCDE_VAL2REG(MCDE_CR, MCDEEN, __x)
+#define MCDE_CONF0 0x00000004
+#define MCDE_CONF0_SYNCMUX0_SHIFT 0
+#define MCDE_CONF0_SYNCMUX0_MASK 0x00000001
+#define MCDE_CONF0_SYNCMUX0(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX0, __x)
+#define MCDE_CONF0_SYNCMUX1_SHIFT 1
+#define MCDE_CONF0_SYNCMUX1_MASK 0x00000002
+#define MCDE_CONF0_SYNCMUX1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX1, __x)
+#define MCDE_CONF0_SYNCMUX2_SHIFT 2
+#define MCDE_CONF0_SYNCMUX2_MASK 0x00000004
+#define MCDE_CONF0_SYNCMUX2(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX2, __x)
+#define MCDE_CONF0_SYNCMUX3_SHIFT 3
+#define MCDE_CONF0_SYNCMUX3_MASK 0x00000008
+#define MCDE_CONF0_SYNCMUX3(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX3, __x)
+#define MCDE_CONF0_SYNCMUX4_SHIFT 4
+#define MCDE_CONF0_SYNCMUX4_MASK 0x00000010
+#define MCDE_CONF0_SYNCMUX4(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX4, __x)
+#define MCDE_CONF0_SYNCMUX5_SHIFT 5
+#define MCDE_CONF0_SYNCMUX5_MASK 0x00000020
+#define MCDE_CONF0_SYNCMUX5(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX5, __x)
+#define MCDE_CONF0_SYNCMUX6_SHIFT 6
+#define MCDE_CONF0_SYNCMUX6_MASK 0x00000040
+#define MCDE_CONF0_SYNCMUX6(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX6, __x)
+#define MCDE_CONF0_SYNCMUX7_SHIFT 7
+#define MCDE_CONF0_SYNCMUX7_MASK 0x00000080
+#define MCDE_CONF0_SYNCMUX7(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, SYNCMUX7, __x)
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, IFIFOCTRLWTRMRKLVL, __x)
+#define MCDE_CONF0_OUTMUX0_SHIFT 16
+#define MCDE_CONF0_OUTMUX0_MASK 0x00070000
+#define MCDE_CONF0_OUTMUX0(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX0, __x)
+#define MCDE_CONF0_OUTMUX1_SHIFT 19
+#define MCDE_CONF0_OUTMUX1_MASK 0x00380000
+#define MCDE_CONF0_OUTMUX1(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX1, __x)
+#define MCDE_CONF0_OUTMUX2_SHIFT 22
+#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000
+#define MCDE_CONF0_OUTMUX2(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX2, __x)
+#define MCDE_CONF0_OUTMUX3_SHIFT 25
+#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000
+#define MCDE_CONF0_OUTMUX3(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX3, __x)
+#define MCDE_CONF0_OUTMUX4_SHIFT 28
+#define MCDE_CONF0_OUTMUX4_MASK 0x70000000
+#define MCDE_CONF0_OUTMUX4(__x) \
+ MCDE_VAL2REG(MCDE_CONF0, OUTMUX4, __x)
+#define MCDE_SSP 0x00000008
+#define MCDE_SSP_SSPDATA_SHIFT 0
+#define MCDE_SSP_SSPDATA_MASK 0x000000FF
+#define MCDE_SSP_SSPDATA(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPDATA, __x)
+#define MCDE_SSP_SSPCMD_SHIFT 8
+#define MCDE_SSP_SSPCMD_MASK 0x00000100
+#define MCDE_SSP_SSPCMD_DATA 0
+#define MCDE_SSP_SSPCMD_COMMAND 1
+#define MCDE_SSP_SSPCMD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPCMD, MCDE_SSP_SSPCMD_##__x)
+#define MCDE_SSP_SSPCMD(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPCMD, __x)
+#define MCDE_SSP_SSPEN_SHIFT 16
+#define MCDE_SSP_SSPEN_MASK 0x00010000
+#define MCDE_SSP_SSPEN(__x) \
+ MCDE_VAL2REG(MCDE_SSP, SSPEN, __x)
+#define MCDE_AIS 0x00000100
+#define MCDE_AIS_MCDEPPI_SHIFT 0
+#define MCDE_AIS_MCDEPPI_MASK 0x00000001
+#define MCDE_AIS_MCDEPPI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEPPI, __x)
+#define MCDE_AIS_MCDEOVLI_SHIFT 1
+#define MCDE_AIS_MCDEOVLI_MASK 0x00000002
+#define MCDE_AIS_MCDEOVLI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEOVLI, __x)
+#define MCDE_AIS_MCDECHNLI_SHIFT 2
+#define MCDE_AIS_MCDECHNLI_MASK 0x00000004
+#define MCDE_AIS_MCDECHNLI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDECHNLI, __x)
+#define MCDE_AIS_MCDEERRI_SHIFT 3
+#define MCDE_AIS_MCDEERRI_MASK 0x00000008
+#define MCDE_AIS_MCDEERRI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, MCDEERRI, __x)
+#define MCDE_AIS_DSI0AI_SHIFT 4
+#define MCDE_AIS_DSI0AI_MASK 0x00000010
+#define MCDE_AIS_DSI0AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI0AI, __x)
+#define MCDE_AIS_DSI1AI_SHIFT 5
+#define MCDE_AIS_DSI1AI_MASK 0x00000020
+#define MCDE_AIS_DSI1AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI1AI, __x)
+#define MCDE_AIS_DSI2AI_SHIFT 6
+#define MCDE_AIS_DSI2AI_MASK 0x00000040
+#define MCDE_AIS_DSI2AI(__x) \
+ MCDE_VAL2REG(MCDE_AIS, DSI2AI, __x)
+#define MCDE_IMSCPP 0x00000104
+#define MCDE_IMSCPP_VCMPAIM_SHIFT 0
+#define MCDE_IMSCPP_VCMPAIM_MASK 0x00000001
+#define MCDE_IMSCPP_VCMPAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPAIM, __x)
+#define MCDE_IMSCPP_VCMPBIM_SHIFT 1
+#define MCDE_IMSCPP_VCMPBIM_MASK 0x00000002
+#define MCDE_IMSCPP_VCMPBIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPBIM, __x)
+#define MCDE_IMSCPP_VSCC0IM_SHIFT 2
+#define MCDE_IMSCPP_VSCC0IM_MASK 0x00000004
+#define MCDE_IMSCPP_VSCC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VSCC0IM, __x)
+#define MCDE_IMSCPP_VSCC1IM_SHIFT 3
+#define MCDE_IMSCPP_VSCC1IM_MASK 0x00000008
+#define MCDE_IMSCPP_VSCC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VSCC1IM, __x)
+#define MCDE_IMSCPP_VCMPC0IM_SHIFT 4
+#define MCDE_IMSCPP_VCMPC0IM_MASK 0x00000010
+#define MCDE_IMSCPP_VCMPC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPC0IM, __x)
+#define MCDE_IMSCPP_VCMPC1IM_SHIFT 5
+#define MCDE_IMSCPP_VCMPC1IM_MASK 0x00000020
+#define MCDE_IMSCPP_VCMPC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, VCMPC1IM, __x)
+#define MCDE_IMSCPP_ROTFDIM_B_SHIFT 6
+#define MCDE_IMSCPP_ROTFDIM_B_MASK 0x00000040
+#define MCDE_IMSCPP_ROTFDIM_B(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_B, __x)
+#define MCDE_IMSCPP_ROTFDIM_A_SHIFT 7
+#define MCDE_IMSCPP_ROTFDIM_A_MASK 0x00000080
+#define MCDE_IMSCPP_ROTFDIM_A(__x) \
+ MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_A, __x)
+#define MCDE_IMSCOVL 0x00000108
+#define MCDE_IMSCOVL_OVLRDIM_SHIFT 0
+#define MCDE_IMSCOVL_OVLRDIM_MASK 0x0000FFFF
+#define MCDE_IMSCOVL_OVLRDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCOVL, OVLRDIM, __x)
+#define MCDE_IMSCOVL_OVLFDIM_SHIFT 16
+#define MCDE_IMSCOVL_OVLFDIM_MASK 0xFFFF0000
+#define MCDE_IMSCOVL_OVLFDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCOVL, OVLFDIM, __x)
+#define MCDE_IMSCCHNL 0x0000010C
+#define MCDE_IMSCCHNL_CHNLRDIM_SHIFT 0
+#define MCDE_IMSCCHNL_CHNLRDIM_MASK 0x0000FFFF
+#define MCDE_IMSCCHNL_CHNLRDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLRDIM, __x)
+#define MCDE_IMSCCHNL_CHNLAIM_SHIFT 16
+#define MCDE_IMSCCHNL_CHNLAIM_MASK 0xFFFF0000
+#define MCDE_IMSCCHNL_CHNLAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLAIM, __x)
+#define MCDE_IMSCERR 0x00000110
+#define MCDE_IMSCERR_FUAIM_SHIFT 0
+#define MCDE_IMSCERR_FUAIM_MASK 0x00000001
+#define MCDE_IMSCERR_FUAIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUAIM, __x)
+#define MCDE_IMSCERR_FUBIM_SHIFT 1
+#define MCDE_IMSCERR_FUBIM_MASK 0x00000002
+#define MCDE_IMSCERR_FUBIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUBIM, __x)
+#define MCDE_IMSCERR_SCHBLCKDIM_SHIFT 2
+#define MCDE_IMSCERR_SCHBLCKDIM_MASK 0x00000004
+#define MCDE_IMSCERR_SCHBLCKDIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, SCHBLCKDIM, __x)
+#define MCDE_IMSCERR_ROTAFEIM_WRITE_SHIFT 3
+#define MCDE_IMSCERR_ROTAFEIM_WRITE_MASK 0x00000008
+#define MCDE_IMSCERR_ROTAFEIM_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_WRITE, __x)
+#define MCDE_IMSCERR_ROTAFEIM_READ_SHIFT 4
+#define MCDE_IMSCERR_ROTAFEIM_READ_MASK 0x00000010
+#define MCDE_IMSCERR_ROTAFEIM_READ(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_READ, __x)
+#define MCDE_IMSCERR_ROTBFEIM_WRITE_SHIFT 5
+#define MCDE_IMSCERR_ROTBFEIM_WRITE_MASK 0x00000020
+#define MCDE_IMSCERR_ROTBFEIM_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_WRITE, __x)
+#define MCDE_IMSCERR_ROTBFEIM_READ_SHIFT 6
+#define MCDE_IMSCERR_ROTBFEIM_READ_MASK 0x00000040
+#define MCDE_IMSCERR_ROTBFEIM_READ(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_READ, __x)
+#define MCDE_IMSCERR_FUC0IM_SHIFT 7
+#define MCDE_IMSCERR_FUC0IM_MASK 0x00000080
+#define MCDE_IMSCERR_FUC0IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUC0IM, __x)
+#define MCDE_IMSCERR_FUC1IM_SHIFT 8
+#define MCDE_IMSCERR_FUC1IM_MASK 0x00000100
+#define MCDE_IMSCERR_FUC1IM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, FUC1IM, __x)
+#define MCDE_IMSCERR_OVLFERRIM_SHIFT 16
+#define MCDE_IMSCERR_OVLFERRIM_MASK 0xFFFF0000
+#define MCDE_IMSCERR_OVLFERRIM(__x) \
+ MCDE_VAL2REG(MCDE_IMSCERR, OVLFERRIM, __x)
+#define MCDE_RISPP 0x00000114
+#define MCDE_RISPP_VCMPARIS_SHIFT 0
+#define MCDE_RISPP_VCMPARIS_MASK 0x00000001
+#define MCDE_RISPP_VCMPARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPARIS, __x)
+#define MCDE_RISPP_VCMPBRIS_SHIFT 1
+#define MCDE_RISPP_VCMPBRIS_MASK 0x00000002
+#define MCDE_RISPP_VCMPBRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPBRIS, __x)
+#define MCDE_RISPP_VSCC0RIS_SHIFT 2
+#define MCDE_RISPP_VSCC0RIS_MASK 0x00000004
+#define MCDE_RISPP_VSCC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VSCC0RIS, __x)
+#define MCDE_RISPP_VSCC1RIS_SHIFT 3
+#define MCDE_RISPP_VSCC1RIS_MASK 0x00000008
+#define MCDE_RISPP_VSCC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VSCC1RIS, __x)
+#define MCDE_RISPP_VCMPC0RIS_SHIFT 4
+#define MCDE_RISPP_VCMPC0RIS_MASK 0x00000010
+#define MCDE_RISPP_VCMPC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPC0RIS, __x)
+#define MCDE_RISPP_VCMPC1RIS_SHIFT 5
+#define MCDE_RISPP_VCMPC1RIS_MASK 0x00000020
+#define MCDE_RISPP_VCMPC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, VCMPC1RIS, __x)
+#define MCDE_RISPP_ROTFDRIS_B_SHIFT 6
+#define MCDE_RISPP_ROTFDRIS_B_MASK 0x00000040
+#define MCDE_RISPP_ROTFDRIS_B(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_B, __x)
+#define MCDE_RISPP_ROTFDRIS_A_SHIFT 7
+#define MCDE_RISPP_ROTFDRIS_A_MASK 0x00000080
+#define MCDE_RISPP_ROTFDRIS_A(__x) \
+ MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_A, __x)
+#define MCDE_RISOVL 0x00000118
+#define MCDE_RISOVL_OVLRDRIS_SHIFT 0
+#define MCDE_RISOVL_OVLRDRIS_MASK 0x0000FFFF
+#define MCDE_RISOVL_OVLRDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISOVL, OVLRDRIS, __x)
+#define MCDE_RISOVL_OVLFDRIS_SHIFT 16
+#define MCDE_RISOVL_OVLFDRIS_MASK 0xFFFF0000
+#define MCDE_RISOVL_OVLFDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISOVL, OVLFDRIS, __x)
+#define MCDE_RISCHNL 0x0000011C
+#define MCDE_RISCHNL_CHNLRDRIS_SHIFT 0
+#define MCDE_RISCHNL_CHNLRDRIS_MASK 0x0000FFFF
+#define MCDE_RISCHNL_CHNLRDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISCHNL, CHNLRDRIS, __x)
+#define MCDE_RISCHNL_CHNLARIS_SHIFT 16
+#define MCDE_RISCHNL_CHNLARIS_MASK 0xFFFF0000
+#define MCDE_RISCHNL_CHNLARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISCHNL, CHNLARIS, __x)
+#define MCDE_RISERR 0x00000120
+#define MCDE_RISERR_FUARIS_SHIFT 0
+#define MCDE_RISERR_FUARIS_MASK 0x00000001
+#define MCDE_RISERR_FUARIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUARIS, __x)
+#define MCDE_RISERR_FUBRIS_SHIFT 1
+#define MCDE_RISERR_FUBRIS_MASK 0x00000002
+#define MCDE_RISERR_FUBRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUBRIS, __x)
+#define MCDE_RISERR_SCHBLCKDRIS_SHIFT 2
+#define MCDE_RISERR_SCHBLCKDRIS_MASK 0x00000004
+#define MCDE_RISERR_SCHBLCKDRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, SCHBLCKDRIS, __x)
+#define MCDE_RISERR_ROTAFERIS_WRITE_SHIFT 3
+#define MCDE_RISERR_ROTAFERIS_WRITE_MASK 0x00000008
+#define MCDE_RISERR_ROTAFERIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_WRITE, __x)
+#define MCDE_RISERR_ROTAFERIS_READ_SHIFT 4
+#define MCDE_RISERR_ROTAFERIS_READ_MASK 0x00000010
+#define MCDE_RISERR_ROTAFERIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_READ, __x)
+#define MCDE_RISERR_ROTBFERIS_WRITE_SHIFT 5
+#define MCDE_RISERR_ROTBFERIS_WRITE_MASK 0x00000020
+#define MCDE_RISERR_ROTBFERIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_WRITE, __x)
+#define MCDE_RISERR_ROTBFERIS_READ_SHIFT 6
+#define MCDE_RISERR_ROTBFERIS_READ_MASK 0x00000040
+#define MCDE_RISERR_ROTBFERIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_READ, __x)
+#define MCDE_RISERR_FUC0RIS_SHIFT 7
+#define MCDE_RISERR_FUC0RIS_MASK 0x00000080
+#define MCDE_RISERR_FUC0RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUC0RIS, __x)
+#define MCDE_RISERR_FUC1RIS_SHIFT 8
+#define MCDE_RISERR_FUC1RIS_MASK 0x00000100
+#define MCDE_RISERR_FUC1RIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, FUC1RIS, __x)
+#define MCDE_RISERR_OVLFERRRIS_SHIFT 16
+#define MCDE_RISERR_OVLFERRRIS_MASK 0xFFFF0000
+#define MCDE_RISERR_OVLFERRRIS(__x) \
+ MCDE_VAL2REG(MCDE_RISERR, OVLFERRRIS, __x)
+#define MCDE_MISPP 0x00000124
+#define MCDE_MISPP_VCMPAMIS_SHIFT 0
+#define MCDE_MISPP_VCMPAMIS_MASK 0x00000001
+#define MCDE_MISPP_VCMPAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPAMIS, __x)
+#define MCDE_MISPP_VCMPBMIS_SHIFT 1
+#define MCDE_MISPP_VCMPBMIS_MASK 0x00000002
+#define MCDE_MISPP_VCMPBMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPBMIS, __x)
+#define MCDE_MISPP_VSCC0MIS_SHIFT 2
+#define MCDE_MISPP_VSCC0MIS_MASK 0x00000004
+#define MCDE_MISPP_VSCC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VSCC0MIS, __x)
+#define MCDE_MISPP_VSCC1MIS_SHIFT 3
+#define MCDE_MISPP_VSCC1MIS_MASK 0x00000008
+#define MCDE_MISPP_VSCC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VSCC1MIS, __x)
+#define MCDE_MISPP_VCMPC0MIS_SHIFT 4
+#define MCDE_MISPP_VCMPC0MIS_MASK 0x00000010
+#define MCDE_MISPP_VCMPC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPC0MIS, __x)
+#define MCDE_MISPP_VCMPC1MIS_SHIFT 5
+#define MCDE_MISPP_VCMPC1MIS_MASK 0x00000020
+#define MCDE_MISPP_VCMPC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, VCMPC1MIS, __x)
+#define MCDE_MISPP_ROTFDMIS_A_SHIFT 6
+#define MCDE_MISPP_ROTFDMIS_A_MASK 0x00000040
+#define MCDE_MISPP_ROTFDMIS_A(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_A, __x)
+#define MCDE_MISPP_ROTFDMIS_B_SHIFT 7
+#define MCDE_MISPP_ROTFDMIS_B_MASK 0x00000080
+#define MCDE_MISPP_ROTFDMIS_B(__x) \
+ MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_B, __x)
+#define MCDE_MISOVL 0x00000128
+#define MCDE_MISOVL_OVLRDMIS_SHIFT 0
+#define MCDE_MISOVL_OVLRDMIS_MASK 0x0000FFFF
+#define MCDE_MISOVL_OVLRDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISOVL, OVLRDMIS, __x)
+#define MCDE_MISOVL_OVLFDMIS_SHIFT 16
+#define MCDE_MISOVL_OVLFDMIS_MASK 0xFFFF0000
+#define MCDE_MISOVL_OVLFDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISOVL, OVLFDMIS, __x)
+#define MCDE_MISCHNL 0x0000012C
+#define MCDE_MISCHNL_CHNLRDMIS_SHIFT 0
+#define MCDE_MISCHNL_CHNLRDMIS_MASK 0x0000FFFF
+#define MCDE_MISCHNL_CHNLRDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISCHNL, CHNLRDMIS, __x)
+#define MCDE_MISCHNL_CHNLAMIS_SHIFT 16
+#define MCDE_MISCHNL_CHNLAMIS_MASK 0xFFFF0000
+#define MCDE_MISCHNL_CHNLAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISCHNL, CHNLAMIS, __x)
+#define MCDE_MISERR 0x00000130
+#define MCDE_MISERR_FUAMIS_SHIFT 0
+#define MCDE_MISERR_FUAMIS_MASK 0x00000001
+#define MCDE_MISERR_FUAMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUAMIS, __x)
+#define MCDE_MISERR_FUBMIS_SHIFT 1
+#define MCDE_MISERR_FUBMIS_MASK 0x00000002
+#define MCDE_MISERR_FUBMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUBMIS, __x)
+#define MCDE_MISERR_SCHBLCKDMIS_SHIFT 2
+#define MCDE_MISERR_SCHBLCKDMIS_MASK 0x00000004
+#define MCDE_MISERR_SCHBLCKDMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, SCHBLCKDMIS, __x)
+#define MCDE_MISERR_ROTAFEMIS_WRITE_SHIFT 3
+#define MCDE_MISERR_ROTAFEMIS_WRITE_MASK 0x00000008
+#define MCDE_MISERR_ROTAFEMIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_WRITE, __x)
+#define MCDE_MISERR_ROTAFEMIS_READ_SHIFT 4
+#define MCDE_MISERR_ROTAFEMIS_READ_MASK 0x00000010
+#define MCDE_MISERR_ROTAFEMIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_READ, __x)
+#define MCDE_MISERR_ROTBFEMIS_WRITE_SHIFT 5
+#define MCDE_MISERR_ROTBFEMIS_WRITE_MASK 0x00000020
+#define MCDE_MISERR_ROTBFEMIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_WRITE, __x)
+#define MCDE_MISERR_ROTBFEMIS_READ_SHIFT 6
+#define MCDE_MISERR_ROTBFEMIS_READ_MASK 0x00000040
+#define MCDE_MISERR_ROTBFEMIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_READ, __x)
+#define MCDE_MISERR_FUC0MIS_SHIFT 7
+#define MCDE_MISERR_FUC0MIS_MASK 0x00000080
+#define MCDE_MISERR_FUC0MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUC0MIS, __x)
+#define MCDE_MISERR_FUC1MIS_SHIFT 8
+#define MCDE_MISERR_FUC1MIS_MASK 0x00000100
+#define MCDE_MISERR_FUC1MIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, FUC1MIS, __x)
+#define MCDE_MISERR_OVLFERMIS_SHIFT 16
+#define MCDE_MISERR_OVLFERMIS_MASK 0xFFFF0000
+#define MCDE_MISERR_OVLFERMIS(__x) \
+ MCDE_VAL2REG(MCDE_MISERR, OVLFERMIS, __x)
+#define MCDE_SISPP 0x00000134
+#define MCDE_SISPP_VCMPASIS_SHIFT 0
+#define MCDE_SISPP_VCMPASIS_MASK 0x00000001
+#define MCDE_SISPP_VCMPASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPASIS, __x)
+#define MCDE_SISPP_VCMPBSIS_SHIFT 1
+#define MCDE_SISPP_VCMPBSIS_MASK 0x00000002
+#define MCDE_SISPP_VCMPBSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPBSIS, __x)
+#define MCDE_SISPP_VSCC0SIS_SHIFT 2
+#define MCDE_SISPP_VSCC0SIS_MASK 0x00000004
+#define MCDE_SISPP_VSCC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VSCC0SIS, __x)
+#define MCDE_SISPP_VSCC1SIS_SHIFT 3
+#define MCDE_SISPP_VSCC1SIS_MASK 0x00000008
+#define MCDE_SISPP_VSCC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VSCC1SIS, __x)
+#define MCDE_SISPP_VCMPC0SIS_SHIFT 4
+#define MCDE_SISPP_VCMPC0SIS_MASK 0x00000010
+#define MCDE_SISPP_VCMPC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPC0SIS, __x)
+#define MCDE_SISPP_VCMPC1SIS_SHIFT 5
+#define MCDE_SISPP_VCMPC1SIS_MASK 0x00000020
+#define MCDE_SISPP_VCMPC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, VCMPC1SIS, __x)
+#define MCDE_SISPP_ROTFDSIS_A_SHIFT 6
+#define MCDE_SISPP_ROTFDSIS_A_MASK 0x00000040
+#define MCDE_SISPP_ROTFDSIS_A(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_A, __x)
+#define MCDE_SISPP_ROTFDSIS_B_SHIFT 7
+#define MCDE_SISPP_ROTFDSIS_B_MASK 0x00000080
+#define MCDE_SISPP_ROTFDSIS_B(__x) \
+ MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_B, __x)
+#define MCDE_SISOVL 0x00000138
+#define MCDE_SISOVL_OVLRDSIS_SHIFT 0
+#define MCDE_SISOVL_OVLRDSIS_MASK 0x0000FFFF
+#define MCDE_SISOVL_OVLRDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISOVL, OVLRDSIS, __x)
+#define MCDE_SISOVL_OVLFDSIS_SHIFT 16
+#define MCDE_SISOVL_OVLFDSIS_MASK 0xFFFF0000
+#define MCDE_SISOVL_OVLFDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISOVL, OVLFDSIS, __x)
+#define MCDE_SISCHNL 0x0000013C
+#define MCDE_SISCHNL_CHNLRDSIS_SHIFT 0
+#define MCDE_SISCHNL_CHNLRDSIS_MASK 0x0000FFFF
+#define MCDE_SISCHNL_CHNLRDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISCHNL, CHNLRDSIS, __x)
+#define MCDE_SISCHNL_CHNLASIS_SHIFT 16
+#define MCDE_SISCHNL_CHNLASIS_MASK 0xFFFF0000
+#define MCDE_SISCHNL_CHNLASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISCHNL, CHNLASIS, __x)
+#define MCDE_SISERR 0x00000140
+#define MCDE_SISERR_FUASIS_SHIFT 0
+#define MCDE_SISERR_FUASIS_MASK 0x00000001
+#define MCDE_SISERR_FUASIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUASIS, __x)
+#define MCDE_SISERR_FUBSIS_SHIFT 1
+#define MCDE_SISERR_FUBSIS_MASK 0x00000002
+#define MCDE_SISERR_FUBSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUBSIS, __x)
+#define MCDE_SISERR_SCHBLCKDSIS_SHIFT 2
+#define MCDE_SISERR_SCHBLCKDSIS_MASK 0x00000004
+#define MCDE_SISERR_SCHBLCKDSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, SCHBLCKDSIS, __x)
+#define MCDE_SISERR_ROTAFESIS_WRITE_SHIFT 3
+#define MCDE_SISERR_ROTAFESIS_WRITE_MASK 0x00000008
+#define MCDE_SISERR_ROTAFESIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_WRITE, __x)
+#define MCDE_SISERR_ROTAFESIS_READ_SHIFT 4
+#define MCDE_SISERR_ROTAFESIS_READ_MASK 0x00000010
+#define MCDE_SISERR_ROTAFESIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_READ, __x)
+#define MCDE_SISERR_ROTBFESIS_WRITE_SHIFT 5
+#define MCDE_SISERR_ROTBFESIS_WRITE_MASK 0x00000020
+#define MCDE_SISERR_ROTBFESIS_WRITE(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_WRITE, __x)
+#define MCDE_SISERR_ROTBFESIS_READ_SHIFT 6
+#define MCDE_SISERR_ROTBFESIS_READ_MASK 0x00000040
+#define MCDE_SISERR_ROTBFESIS_READ(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_READ, __x)
+#define MCDE_SISERR_FUC0SIS_SHIFT 7
+#define MCDE_SISERR_FUC0SIS_MASK 0x00000080
+#define MCDE_SISERR_FUC0SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUC0SIS, __x)
+#define MCDE_SISERR_FUC1SIS_SHIFT 8
+#define MCDE_SISERR_FUC1SIS_MASK 0x00000100
+#define MCDE_SISERR_FUC1SIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, FUC1SIS, __x)
+#define MCDE_SISERR_OVLFERSIS_SHIFT 16
+#define MCDE_SISERR_OVLFERSIS_MASK 0xFFFF0000
+#define MCDE_SISERR_OVLFERSIS(__x) \
+ MCDE_VAL2REG(MCDE_SISERR, OVLFERSIS, __x)
+#define MCDE_PID 0x000001FC
+#define MCDE_PID_METALFIX_VERSION_SHIFT 0
+#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF
+#define MCDE_PID_METALFIX_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, METALFIX_VERSION, __x)
+#define MCDE_PID_DEVELOPMENT_VERSION_SHIFT 8
+#define MCDE_PID_DEVELOPMENT_VERSION_MASK 0x0000FF00
+#define MCDE_PID_DEVELOPMENT_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, DEVELOPMENT_VERSION, __x)
+#define MCDE_PID_MINOR_VERSION_SHIFT 16
+#define MCDE_PID_MINOR_VERSION_MASK 0x00FF0000
+#define MCDE_PID_MINOR_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, MINOR_VERSION, __x)
+#define MCDE_PID_MAJOR_VERSION_SHIFT 24
+#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000
+#define MCDE_PID_MAJOR_VERSION(__x) \
+ MCDE_VAL2REG(MCDE_PID, MAJOR_VERSION, __x)
+#define MCDE_EXTSRC0A0 0x00000200
+#define MCDE_EXTSRC0A0_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC0A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC0A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC1A0 0x00000220
+#define MCDE_EXTSRC1A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC1A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC1A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC2A0 0x00000240
+#define MCDE_EXTSRC2A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC2A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC2A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC3A0 0x00000260
+#define MCDE_EXTSRC3A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC3A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC3A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC4A0 0x00000280
+#define MCDE_EXTSRC4A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC4A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC4A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC5A0 0x000002A0
+#define MCDE_EXTSRC5A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC5A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC5A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC6A0 0x000002C0
+#define MCDE_EXTSRC6A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC6A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC7A0 0x000002E0
+#define MCDE_EXTSRC7A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC7A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC7A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC8A0 0x00000300
+#define MCDE_EXTSRC8A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC8A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC8A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC9A0 0x00000320
+#define MCDE_EXTSRC9A0_BASEADDRESS0_SHIFT 3
+#define MCDE_EXTSRC9A0_BASEADDRESS0_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC9A0_BASEADDRESS0(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9A0, BASEADDRESS0, __x)
+#define MCDE_EXTSRC0A1 0x00000204
+#define MCDE_EXTSRC0A1_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC0A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC0A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC1A1 0x00000224
+#define MCDE_EXTSRC1A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC1A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC1A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC2A1 0x00000244
+#define MCDE_EXTSRC2A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC2A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC2A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC3A1 0x00000264
+#define MCDE_EXTSRC3A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC3A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC3A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC4A1 0x00000284
+#define MCDE_EXTSRC4A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC4A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC4A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC5A1 0x000002A4
+#define MCDE_EXTSRC5A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC5A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC5A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC6A1 0x000002C4
+#define MCDE_EXTSRC6A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC6A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC7A1 0x000002E4
+#define MCDE_EXTSRC7A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC7A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC7A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC8A1 0x00000304
+#define MCDE_EXTSRC8A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC8A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC8A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC9A1 0x00000324
+#define MCDE_EXTSRC9A1_BASEADDRESS1_SHIFT 3
+#define MCDE_EXTSRC9A1_BASEADDRESS1_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC9A1_BASEADDRESS1(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9A1, BASEADDRESS1, __x)
+#define MCDE_EXTSRC6A2 0x000002C8
+#define MCDE_EXTSRC6A2_BASEADDRESS2_SHIFT 3
+#define MCDE_EXTSRC6A2_BASEADDRESS2_MASK 0xFFFFFFF8
+#define MCDE_EXTSRC6A2_BASEADDRESS2(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6A2, BASEADDRESS2, __x)
+#define MCDE_EXTSRC0CONF 0x0000020C
+#define MCDE_EXTSRC0CONF_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC0CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC0CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_ID, __x)
+#define MCDE_EXTSRC0CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC0CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC0CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_NB, __x)
+#define MCDE_EXTSRC0CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC0CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC0CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC0CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC0CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC0CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC0CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC0CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC0CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC0CONF_BPP_RGB444 4
+#define MCDE_EXTSRC0CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC0CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC0CONF_BPP_RGB565 7
+#define MCDE_EXTSRC0CONF_BPP_RGB888 8
+#define MCDE_EXTSRC0CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC0CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC0CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC0CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, MCDE_EXTSRC0CONF_BPP_##__x)
+#define MCDE_EXTSRC0CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, __x)
+#define MCDE_EXTSRC0CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC0CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC0CONF_BGR_RGB 0
+#define MCDE_EXTSRC0CONF_BGR_BGR 1
+#define MCDE_EXTSRC0CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, MCDE_EXTSRC0CONF_BGR_##__x)
+#define MCDE_EXTSRC0CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, __x)
+#define MCDE_EXTSRC0CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC0CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC0CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC0CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC0CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, MCDE_EXTSRC0CONF_BEBO_##__x)
+#define MCDE_EXTSRC0CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, __x)
+#define MCDE_EXTSRC0CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC0CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC0CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC0CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC0CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, MCDE_EXTSRC0CONF_BEPO_##__x)
+#define MCDE_EXTSRC0CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, __x)
+#define MCDE_EXTSRC1CONF 0x0000022C
+#define MCDE_EXTSRC1CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC1CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC1CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_ID, __x)
+#define MCDE_EXTSRC1CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC1CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC1CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_NB, __x)
+#define MCDE_EXTSRC1CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC1CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC1CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC1CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC1CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC1CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC1CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC1CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC1CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC1CONF_BPP_RGB444 4
+#define MCDE_EXTSRC1CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC1CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC1CONF_BPP_RGB565 7
+#define MCDE_EXTSRC1CONF_BPP_RGB888 8
+#define MCDE_EXTSRC1CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC1CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC1CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC1CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, MCDE_EXTSRC1CONF_BPP_##__x)
+#define MCDE_EXTSRC1CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, __x)
+#define MCDE_EXTSRC1CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC1CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC1CONF_BGR_RGB 0
+#define MCDE_EXTSRC1CONF_BGR_BGR 1
+#define MCDE_EXTSRC1CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, MCDE_EXTSRC1CONF_BGR_##__x)
+#define MCDE_EXTSRC1CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, __x)
+#define MCDE_EXTSRC1CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC1CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC1CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC1CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC1CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, MCDE_EXTSRC1CONF_BEBO_##__x)
+#define MCDE_EXTSRC1CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, __x)
+#define MCDE_EXTSRC1CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC1CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC1CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC1CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC1CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, MCDE_EXTSRC1CONF_BEPO_##__x)
+#define MCDE_EXTSRC1CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, __x)
+#define MCDE_EXTSRC2CONF 0x0000024C
+#define MCDE_EXTSRC2CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC2CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC2CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_ID, __x)
+#define MCDE_EXTSRC2CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC2CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC2CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_NB, __x)
+#define MCDE_EXTSRC2CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC2CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC2CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC2CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC2CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC2CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC2CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC2CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC2CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC2CONF_BPP_RGB444 4
+#define MCDE_EXTSRC2CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC2CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC2CONF_BPP_RGB565 7
+#define MCDE_EXTSRC2CONF_BPP_RGB888 8
+#define MCDE_EXTSRC2CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC2CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC2CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC2CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, MCDE_EXTSRC2CONF_BPP_##__x)
+#define MCDE_EXTSRC2CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, __x)
+#define MCDE_EXTSRC2CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC2CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC2CONF_BGR_RGB 0
+#define MCDE_EXTSRC2CONF_BGR_BGR 1
+#define MCDE_EXTSRC2CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, MCDE_EXTSRC2CONF_BGR_##__x)
+#define MCDE_EXTSRC2CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, __x)
+#define MCDE_EXTSRC2CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC2CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC2CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC2CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC2CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, MCDE_EXTSRC2CONF_BEBO_##__x)
+#define MCDE_EXTSRC2CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, __x)
+#define MCDE_EXTSRC2CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC2CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC2CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC2CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC2CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, MCDE_EXTSRC2CONF_BEPO_##__x)
+#define MCDE_EXTSRC2CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, __x)
+#define MCDE_EXTSRC3CONF 0x0000026C
+#define MCDE_EXTSRC3CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC3CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC3CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_ID, __x)
+#define MCDE_EXTSRC3CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC3CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC3CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_NB, __x)
+#define MCDE_EXTSRC3CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC3CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC3CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC3CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC3CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC3CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC3CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC3CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC3CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC3CONF_BPP_RGB444 4
+#define MCDE_EXTSRC3CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC3CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC3CONF_BPP_RGB565 7
+#define MCDE_EXTSRC3CONF_BPP_RGB888 8
+#define MCDE_EXTSRC3CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC3CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC3CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC3CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, MCDE_EXTSRC3CONF_BPP_##__x)
+#define MCDE_EXTSRC3CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, __x)
+#define MCDE_EXTSRC3CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC3CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC3CONF_BGR_RGB 0
+#define MCDE_EXTSRC3CONF_BGR_BGR 1
+#define MCDE_EXTSRC3CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, MCDE_EXTSRC3CONF_BGR_##__x)
+#define MCDE_EXTSRC3CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, __x)
+#define MCDE_EXTSRC3CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC3CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC3CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC3CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC3CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, MCDE_EXTSRC3CONF_BEBO_##__x)
+#define MCDE_EXTSRC3CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, __x)
+#define MCDE_EXTSRC3CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC3CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC3CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC3CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC3CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, MCDE_EXTSRC3CONF_BEPO_##__x)
+#define MCDE_EXTSRC3CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, __x)
+#define MCDE_EXTSRC4CONF 0x0000028C
+#define MCDE_EXTSRC4CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC4CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC4CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_ID, __x)
+#define MCDE_EXTSRC4CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC4CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC4CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_NB, __x)
+#define MCDE_EXTSRC4CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC4CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC4CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC4CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC4CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC4CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC4CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC4CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC4CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC4CONF_BPP_RGB444 4
+#define MCDE_EXTSRC4CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC4CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC4CONF_BPP_RGB565 7
+#define MCDE_EXTSRC4CONF_BPP_RGB888 8
+#define MCDE_EXTSRC4CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC4CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC4CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC4CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, MCDE_EXTSRC4CONF_BPP_##__x)
+#define MCDE_EXTSRC4CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, __x)
+#define MCDE_EXTSRC4CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC4CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC4CONF_BGR_RGB 0
+#define MCDE_EXTSRC4CONF_BGR_BGR 1
+#define MCDE_EXTSRC4CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, MCDE_EXTSRC4CONF_BGR_##__x)
+#define MCDE_EXTSRC4CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, __x)
+#define MCDE_EXTSRC4CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC4CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC4CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC4CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC4CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, MCDE_EXTSRC4CONF_BEBO_##__x)
+#define MCDE_EXTSRC4CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, __x)
+#define MCDE_EXTSRC4CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC4CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC4CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC4CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC4CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, MCDE_EXTSRC4CONF_BEPO_##__x)
+#define MCDE_EXTSRC4CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, __x)
+#define MCDE_EXTSRC5CONF 0x000002AC
+#define MCDE_EXTSRC5CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC5CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC5CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_ID, __x)
+#define MCDE_EXTSRC5CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC5CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC5CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_NB, __x)
+#define MCDE_EXTSRC5CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC5CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC5CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC5CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC5CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC5CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC5CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC5CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC5CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC5CONF_BPP_RGB444 4
+#define MCDE_EXTSRC5CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC5CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC5CONF_BPP_RGB565 7
+#define MCDE_EXTSRC5CONF_BPP_RGB888 8
+#define MCDE_EXTSRC5CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC5CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC5CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC5CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, MCDE_EXTSRC5CONF_BPP_##__x)
+#define MCDE_EXTSRC5CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, __x)
+#define MCDE_EXTSRC5CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC5CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC5CONF_BGR_RGB 0
+#define MCDE_EXTSRC5CONF_BGR_BGR 1
+#define MCDE_EXTSRC5CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, MCDE_EXTSRC5CONF_BGR_##__x)
+#define MCDE_EXTSRC5CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, __x)
+#define MCDE_EXTSRC5CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC5CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC5CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC5CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC5CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, MCDE_EXTSRC5CONF_BEBO_##__x)
+#define MCDE_EXTSRC5CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, __x)
+#define MCDE_EXTSRC5CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC5CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC5CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC5CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC5CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, MCDE_EXTSRC5CONF_BEPO_##__x)
+#define MCDE_EXTSRC5CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, __x)
+#define MCDE_EXTSRC6CONF 0x000002CC
+#define MCDE_EXTSRC6CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC6CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC6CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_ID, __x)
+#define MCDE_EXTSRC6CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC6CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC6CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_NB, __x)
+#define MCDE_EXTSRC6CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC6CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC6CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC6CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC6CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC6CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC6CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC6CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC6CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC6CONF_BPP_RGB444 4
+#define MCDE_EXTSRC6CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC6CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC6CONF_BPP_RGB565 7
+#define MCDE_EXTSRC6CONF_BPP_RGB888 8
+#define MCDE_EXTSRC6CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC6CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC6CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC6CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, MCDE_EXTSRC6CONF_BPP_##__x)
+#define MCDE_EXTSRC6CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, __x)
+#define MCDE_EXTSRC6CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC6CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC6CONF_BGR_RGB 0
+#define MCDE_EXTSRC6CONF_BGR_BGR 1
+#define MCDE_EXTSRC6CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, MCDE_EXTSRC6CONF_BGR_##__x)
+#define MCDE_EXTSRC6CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, __x)
+#define MCDE_EXTSRC6CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC6CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC6CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC6CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC6CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, MCDE_EXTSRC6CONF_BEBO_##__x)
+#define MCDE_EXTSRC6CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, __x)
+#define MCDE_EXTSRC6CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC6CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC6CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC6CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC6CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, MCDE_EXTSRC6CONF_BEPO_##__x)
+#define MCDE_EXTSRC6CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, __x)
+#define MCDE_EXTSRC7CONF 0x000002EC
+#define MCDE_EXTSRC7CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC7CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC7CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_ID, __x)
+#define MCDE_EXTSRC7CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC7CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC7CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_NB, __x)
+#define MCDE_EXTSRC7CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC7CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC7CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC7CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC7CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC7CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC7CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC7CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC7CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC7CONF_BPP_RGB444 4
+#define MCDE_EXTSRC7CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC7CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC7CONF_BPP_RGB565 7
+#define MCDE_EXTSRC7CONF_BPP_RGB888 8
+#define MCDE_EXTSRC7CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC7CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC7CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC7CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, MCDE_EXTSRC7CONF_BPP_##__x)
+#define MCDE_EXTSRC7CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, __x)
+#define MCDE_EXTSRC7CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC7CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC7CONF_BGR_RGB 0
+#define MCDE_EXTSRC7CONF_BGR_BGR 1
+#define MCDE_EXTSRC7CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, MCDE_EXTSRC7CONF_BGR_##__x)
+#define MCDE_EXTSRC7CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, __x)
+#define MCDE_EXTSRC7CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC7CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC7CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC7CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC7CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, MCDE_EXTSRC7CONF_BEBO_##__x)
+#define MCDE_EXTSRC7CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, __x)
+#define MCDE_EXTSRC7CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC7CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC7CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC7CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC7CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, MCDE_EXTSRC7CONF_BEPO_##__x)
+#define MCDE_EXTSRC7CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, __x)
+#define MCDE_EXTSRC8CONF 0x0000030C
+#define MCDE_EXTSRC8CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC8CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC8CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_ID, __x)
+#define MCDE_EXTSRC8CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC8CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC8CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_NB, __x)
+#define MCDE_EXTSRC8CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC8CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC8CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC8CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC8CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC8CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC8CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC8CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC8CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC8CONF_BPP_RGB444 4
+#define MCDE_EXTSRC8CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC8CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC8CONF_BPP_RGB565 7
+#define MCDE_EXTSRC8CONF_BPP_RGB888 8
+#define MCDE_EXTSRC8CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC8CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC8CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC8CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, MCDE_EXTSRC8CONF_BPP_##__x)
+#define MCDE_EXTSRC8CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, __x)
+#define MCDE_EXTSRC8CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC8CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC8CONF_BGR_RGB 0
+#define MCDE_EXTSRC8CONF_BGR_BGR 1
+#define MCDE_EXTSRC8CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, MCDE_EXTSRC8CONF_BGR_##__x)
+#define MCDE_EXTSRC8CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, __x)
+#define MCDE_EXTSRC8CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC8CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC8CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC8CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC8CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, MCDE_EXTSRC8CONF_BEBO_##__x)
+#define MCDE_EXTSRC8CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, __x)
+#define MCDE_EXTSRC8CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC8CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC8CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC8CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC8CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, MCDE_EXTSRC8CONF_BEPO_##__x)
+#define MCDE_EXTSRC8CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, __x)
+#define MCDE_EXTSRC9CONF 0x0000032C
+#define MCDE_EXTSRC9CONF_BUF_ID_SHIFT 0
+#define MCDE_EXTSRC9CONF_BUF_ID_MASK 0x00000003
+#define MCDE_EXTSRC9CONF_BUF_ID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_ID, __x)
+#define MCDE_EXTSRC9CONF_BUF_NB_SHIFT 2
+#define MCDE_EXTSRC9CONF_BUF_NB_MASK 0x0000000C
+#define MCDE_EXTSRC9CONF_BUF_NB(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_NB, __x)
+#define MCDE_EXTSRC9CONF_PRI_OVLID_SHIFT 4
+#define MCDE_EXTSRC9CONF_PRI_OVLID_MASK 0x000000F0
+#define MCDE_EXTSRC9CONF_PRI_OVLID(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, PRI_OVLID, __x)
+#define MCDE_EXTSRC9CONF_BPP_SHIFT 8
+#define MCDE_EXTSRC9CONF_BPP_MASK 0x00000F00
+#define MCDE_EXTSRC9CONF_BPP_1BPP_PAL 0
+#define MCDE_EXTSRC9CONF_BPP_2BPP_PAL 1
+#define MCDE_EXTSRC9CONF_BPP_4BPP_PAL 2
+#define MCDE_EXTSRC9CONF_BPP_8BPP_PAL 3
+#define MCDE_EXTSRC9CONF_BPP_RGB444 4
+#define MCDE_EXTSRC9CONF_BPP_ARGB4444 5
+#define MCDE_EXTSRC9CONF_BPP_IRGB1555 6
+#define MCDE_EXTSRC9CONF_BPP_RGB565 7
+#define MCDE_EXTSRC9CONF_BPP_RGB888 8
+#define MCDE_EXTSRC9CONF_BPP_XRGB8888 9
+#define MCDE_EXTSRC9CONF_BPP_ARGB8888 10
+#define MCDE_EXTSRC9CONF_BPP_YCBCR422 11
+#define MCDE_EXTSRC9CONF_BPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, MCDE_EXTSRC9CONF_BPP_##__x)
+#define MCDE_EXTSRC9CONF_BPP(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, __x)
+#define MCDE_EXTSRC9CONF_BGR_SHIFT 12
+#define MCDE_EXTSRC9CONF_BGR_MASK 0x00001000
+#define MCDE_EXTSRC9CONF_BGR_RGB 0
+#define MCDE_EXTSRC9CONF_BGR_BGR 1
+#define MCDE_EXTSRC9CONF_BGR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, MCDE_EXTSRC9CONF_BGR_##__x)
+#define MCDE_EXTSRC9CONF_BGR(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, __x)
+#define MCDE_EXTSRC9CONF_BEBO_SHIFT 13
+#define MCDE_EXTSRC9CONF_BEBO_MASK 0x00002000
+#define MCDE_EXTSRC9CONF_BEBO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC9CONF_BEBO_BIG_ENDIAN 1
+#define MCDE_EXTSRC9CONF_BEBO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, MCDE_EXTSRC9CONF_BEBO_##__x)
+#define MCDE_EXTSRC9CONF_BEBO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, __x)
+#define MCDE_EXTSRC9CONF_BEPO_SHIFT 14
+#define MCDE_EXTSRC9CONF_BEPO_MASK 0x00004000
+#define MCDE_EXTSRC9CONF_BEPO_LITTLE_ENDIAN 0
+#define MCDE_EXTSRC9CONF_BEPO_BIG_ENDIAN 1
+#define MCDE_EXTSRC9CONF_BEPO_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, MCDE_EXTSRC9CONF_BEPO_##__x)
+#define MCDE_EXTSRC9CONF_BEPO(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, __x)
+#define MCDE_EXTSRC0CR 0x00000210
+#define MCDE_EXTSRC0CR_GROUPOFFSET 0x20
+#define MCDE_EXTSRC0CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC0CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC0CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC0CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, MCDE_EXTSRC0CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC0CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, __x)
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC0CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC0CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC0CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC0CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC0CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC1CR 0x00000230
+#define MCDE_EXTSRC1CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC1CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC1CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC1CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC1CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC1CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, MCDE_EXTSRC1CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC1CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, __x)
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC1CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC1CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC1CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC1CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC1CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC2CR 0x00000250
+#define MCDE_EXTSRC2CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC2CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC2CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC2CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC2CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC2CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, MCDE_EXTSRC2CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC2CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, __x)
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC2CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC2CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC2CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC2CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC2CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC3CR 0x00000270
+#define MCDE_EXTSRC3CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC3CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC3CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC3CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC3CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC3CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, MCDE_EXTSRC3CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC3CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, __x)
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC3CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC3CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC3CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC3CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC3CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC4CR 0x00000290
+#define MCDE_EXTSRC4CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC4CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC4CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC4CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC4CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC4CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, MCDE_EXTSRC4CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC4CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, __x)
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC4CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC4CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC4CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC4CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC4CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC5CR 0x000002B0
+#define MCDE_EXTSRC5CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC5CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC5CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC5CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC5CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC5CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, MCDE_EXTSRC5CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC5CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, __x)
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC5CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC5CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC5CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC5CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC5CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC6CR 0x000002D0
+#define MCDE_EXTSRC6CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC6CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC6CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC6CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC6CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC6CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, MCDE_EXTSRC6CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC6CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, __x)
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC6CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC6CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC6CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC6CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC6CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC7CR 0x000002F0
+#define MCDE_EXTSRC7CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC7CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC7CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC7CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC7CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC7CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, MCDE_EXTSRC7CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC7CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, __x)
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC7CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC7CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC7CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC7CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC7CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC8CR 0x00000310
+#define MCDE_EXTSRC8CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC8CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC8CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC8CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC8CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC8CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, MCDE_EXTSRC8CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC8CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, __x)
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC8CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC8CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC8CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC8CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC8CR, FORCE_FS_DIV, __x)
+#define MCDE_EXTSRC9CR 0x00000330
+#define MCDE_EXTSRC9CR_SEL_MOD_SHIFT 0
+#define MCDE_EXTSRC9CR_SEL_MOD_MASK 0x00000003
+#define MCDE_EXTSRC9CR_SEL_MOD_EXTERNAL_SEL 0
+#define MCDE_EXTSRC9CR_SEL_MOD_AUTO_TOGGLE 1
+#define MCDE_EXTSRC9CR_SEL_MOD_SOFTWARE_SEL 2
+#define MCDE_EXTSRC9CR_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, MCDE_EXTSRC9CR_SEL_MOD_##__x)
+#define MCDE_EXTSRC9CR_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, __x)
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_SHIFT 2
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_MASK 0x00000004
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ALL 0
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_PRIMARY 1
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, \
+ MCDE_EXTSRC9CR_MULTIOVL_CTRL_##__x)
+#define MCDE_EXTSRC9CR_MULTIOVL_CTRL(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, __x)
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_SHIFT 3
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_MASK 0x00000008
+#define MCDE_EXTSRC9CR_FS_DIV_DISABLE(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, FS_DIV_DISABLE, __x)
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV_SHIFT 4
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV_MASK 0x00000010
+#define MCDE_EXTSRC9CR_FORCE_FS_DIV(__x) \
+ MCDE_VAL2REG(MCDE_EXTSRC9CR, FORCE_FS_DIV, __x)
+#define MCDE_OVL0CR 0x00000400
+#define MCDE_OVL0CR_GROUPOFFSET 0x20
+#define MCDE_OVL0CR_OVLEN_SHIFT 0
+#define MCDE_OVL0CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL0CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLEN, __x)
+#define MCDE_OVL0CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL0CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL0CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL0CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL0CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, MCDE_OVL0CR_COLCCTRL_##__x)
+#define MCDE_OVL0CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, __x)
+#define MCDE_OVL0CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL0CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL0CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, CKEYGEN, __x)
+#define MCDE_OVL0CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL0CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL0CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ALPHAPMEN, __x)
+#define MCDE_OVL0CR_OVLF_SHIFT 5
+#define MCDE_OVL0CR_OVLF_MASK 0x00000020
+#define MCDE_OVL0CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLF, __x)
+#define MCDE_OVL0CR_OVLR_SHIFT 6
+#define MCDE_OVL0CR_OVLR_MASK 0x00000040
+#define MCDE_OVL0CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLR, __x)
+#define MCDE_OVL0CR_OVLB_SHIFT 7
+#define MCDE_OVL0CR_OVLB_MASK 0x00000080
+#define MCDE_OVL0CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, OVLB, __x)
+#define MCDE_OVL0CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL0CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL0CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, FETCH_ROPC, __x)
+#define MCDE_OVL0CR_STBPRIO_SHIFT 16
+#define MCDE_OVL0CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL0CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, STBPRIO, __x)
+#define MCDE_OVL0CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL0CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL0CR_BURSTSIZE_1W 0
+#define MCDE_OVL0CR_BURSTSIZE_2W 1
+#define MCDE_OVL0CR_BURSTSIZE_4W 2
+#define MCDE_OVL0CR_BURSTSIZE_8W 3
+#define MCDE_OVL0CR_BURSTSIZE_16W 4
+#define MCDE_OVL0CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL0CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL0CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL0CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL0CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL0CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, MCDE_OVL0CR_BURSTSIZE_##__x)
+#define MCDE_OVL0CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, __x)
+#define MCDE_OVL0CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL0CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL0CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL0CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL0CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL0CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL0CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL0CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, \
+ MCDE_OVL0CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL0CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL0CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL0CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL0CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL0CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL0CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL0CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL0CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL0CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL0CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, MCDE_OVL0CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL0CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL1CR 0x00000420
+#define MCDE_OVL1CR_OVLEN_SHIFT 0
+#define MCDE_OVL1CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL1CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLEN, __x)
+#define MCDE_OVL1CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL1CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL1CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL1CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL1CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL1CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, MCDE_OVL1CR_COLCCTRL_##__x)
+#define MCDE_OVL1CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, __x)
+#define MCDE_OVL1CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL1CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL1CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, CKEYGEN, __x)
+#define MCDE_OVL1CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL1CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL1CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ALPHAPMEN, __x)
+#define MCDE_OVL1CR_OVLF_SHIFT 5
+#define MCDE_OVL1CR_OVLF_MASK 0x00000020
+#define MCDE_OVL1CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLF, __x)
+#define MCDE_OVL1CR_OVLR_SHIFT 6
+#define MCDE_OVL1CR_OVLR_MASK 0x00000040
+#define MCDE_OVL1CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLR, __x)
+#define MCDE_OVL1CR_OVLB_SHIFT 7
+#define MCDE_OVL1CR_OVLB_MASK 0x00000080
+#define MCDE_OVL1CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, OVLB, __x)
+#define MCDE_OVL1CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL1CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL1CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, FETCH_ROPC, __x)
+#define MCDE_OVL1CR_STBPRIO_SHIFT 16
+#define MCDE_OVL1CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL1CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, STBPRIO, __x)
+#define MCDE_OVL1CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL1CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL1CR_BURSTSIZE_1W 0
+#define MCDE_OVL1CR_BURSTSIZE_2W 1
+#define MCDE_OVL1CR_BURSTSIZE_4W 2
+#define MCDE_OVL1CR_BURSTSIZE_8W 3
+#define MCDE_OVL1CR_BURSTSIZE_16W 4
+#define MCDE_OVL1CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL1CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL1CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL1CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL1CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL1CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, MCDE_OVL1CR_BURSTSIZE_##__x)
+#define MCDE_OVL1CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, __x)
+#define MCDE_OVL1CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL1CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL1CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL1CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL1CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL1CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL1CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL1CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, \
+ MCDE_OVL1CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL1CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL1CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL1CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL1CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL1CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL1CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL1CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL1CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL1CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL1CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, MCDE_OVL1CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL1CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL2CR 0x00000440
+#define MCDE_OVL2CR_OVLEN_SHIFT 0
+#define MCDE_OVL2CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL2CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLEN, __x)
+#define MCDE_OVL2CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL2CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL2CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL2CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL2CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL2CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, MCDE_OVL2CR_COLCCTRL_##__x)
+#define MCDE_OVL2CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, __x)
+#define MCDE_OVL2CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL2CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL2CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, CKEYGEN, __x)
+#define MCDE_OVL2CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL2CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL2CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ALPHAPMEN, __x)
+#define MCDE_OVL2CR_OVLF_SHIFT 5
+#define MCDE_OVL2CR_OVLF_MASK 0x00000020
+#define MCDE_OVL2CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLF, __x)
+#define MCDE_OVL2CR_OVLR_SHIFT 6
+#define MCDE_OVL2CR_OVLR_MASK 0x00000040
+#define MCDE_OVL2CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLR, __x)
+#define MCDE_OVL2CR_OVLB_SHIFT 7
+#define MCDE_OVL2CR_OVLB_MASK 0x00000080
+#define MCDE_OVL2CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, OVLB, __x)
+#define MCDE_OVL2CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL2CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL2CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, FETCH_ROPC, __x)
+#define MCDE_OVL2CR_STBPRIO_SHIFT 16
+#define MCDE_OVL2CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL2CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, STBPRIO, __x)
+#define MCDE_OVL2CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL2CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL2CR_BURSTSIZE_1W 0
+#define MCDE_OVL2CR_BURSTSIZE_2W 1
+#define MCDE_OVL2CR_BURSTSIZE_4W 2
+#define MCDE_OVL2CR_BURSTSIZE_8W 3
+#define MCDE_OVL2CR_BURSTSIZE_16W 4
+#define MCDE_OVL2CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL2CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL2CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL2CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL2CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL2CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, MCDE_OVL2CR_BURSTSIZE_##__x)
+#define MCDE_OVL2CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, __x)
+#define MCDE_OVL2CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL2CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL2CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL2CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL2CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL2CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL2CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL2CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, \
+ MCDE_OVL2CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL2CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL2CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL2CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL2CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL2CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL2CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL2CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL2CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL2CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL2CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, MCDE_OVL2CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL2CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL3CR 0x00000460
+#define MCDE_OVL3CR_OVLEN_SHIFT 0
+#define MCDE_OVL3CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL3CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLEN, __x)
+#define MCDE_OVL3CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL3CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL3CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL3CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL3CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL3CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, MCDE_OVL3CR_COLCCTRL_##__x)
+#define MCDE_OVL3CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, __x)
+#define MCDE_OVL3CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL3CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL3CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, CKEYGEN, __x)
+#define MCDE_OVL3CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL3CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL3CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ALPHAPMEN, __x)
+#define MCDE_OVL3CR_OVLF_SHIFT 5
+#define MCDE_OVL3CR_OVLF_MASK 0x00000020
+#define MCDE_OVL3CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLF, __x)
+#define MCDE_OVL3CR_OVLR_SHIFT 6
+#define MCDE_OVL3CR_OVLR_MASK 0x00000040
+#define MCDE_OVL3CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLR, __x)
+#define MCDE_OVL3CR_OVLB_SHIFT 7
+#define MCDE_OVL3CR_OVLB_MASK 0x00000080
+#define MCDE_OVL3CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, OVLB, __x)
+#define MCDE_OVL3CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL3CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL3CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, FETCH_ROPC, __x)
+#define MCDE_OVL3CR_STBPRIO_SHIFT 16
+#define MCDE_OVL3CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL3CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, STBPRIO, __x)
+#define MCDE_OVL3CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL3CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL3CR_BURSTSIZE_1W 0
+#define MCDE_OVL3CR_BURSTSIZE_2W 1
+#define MCDE_OVL3CR_BURSTSIZE_4W 2
+#define MCDE_OVL3CR_BURSTSIZE_8W 3
+#define MCDE_OVL3CR_BURSTSIZE_16W 4
+#define MCDE_OVL3CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL3CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL3CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL3CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL3CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL3CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, MCDE_OVL3CR_BURSTSIZE_##__x)
+#define MCDE_OVL3CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, __x)
+#define MCDE_OVL3CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL3CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL3CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL3CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL3CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL3CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL3CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL3CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, \
+ MCDE_OVL3CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL3CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL3CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL3CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL3CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL3CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL3CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL3CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL3CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL3CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL3CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, MCDE_OVL3CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL3CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL4CR 0x00000480
+#define MCDE_OVL4CR_OVLEN_SHIFT 0
+#define MCDE_OVL4CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL4CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLEN, __x)
+#define MCDE_OVL4CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL4CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL4CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL4CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL4CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL4CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, MCDE_OVL4CR_COLCCTRL_##__x)
+#define MCDE_OVL4CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, __x)
+#define MCDE_OVL4CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL4CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL4CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, CKEYGEN, __x)
+#define MCDE_OVL4CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL4CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL4CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ALPHAPMEN, __x)
+#define MCDE_OVL4CR_OVLF_SHIFT 5
+#define MCDE_OVL4CR_OVLF_MASK 0x00000020
+#define MCDE_OVL4CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLF, __x)
+#define MCDE_OVL4CR_OVLR_SHIFT 6
+#define MCDE_OVL4CR_OVLR_MASK 0x00000040
+#define MCDE_OVL4CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLR, __x)
+#define MCDE_OVL4CR_OVLB_SHIFT 7
+#define MCDE_OVL4CR_OVLB_MASK 0x00000080
+#define MCDE_OVL4CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, OVLB, __x)
+#define MCDE_OVL4CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL4CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL4CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, FETCH_ROPC, __x)
+#define MCDE_OVL4CR_STBPRIO_SHIFT 16
+#define MCDE_OVL4CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL4CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, STBPRIO, __x)
+#define MCDE_OVL4CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL4CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL4CR_BURSTSIZE_1W 0
+#define MCDE_OVL4CR_BURSTSIZE_2W 1
+#define MCDE_OVL4CR_BURSTSIZE_4W 2
+#define MCDE_OVL4CR_BURSTSIZE_8W 3
+#define MCDE_OVL4CR_BURSTSIZE_16W 4
+#define MCDE_OVL4CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL4CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL4CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL4CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL4CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL4CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, MCDE_OVL4CR_BURSTSIZE_##__x)
+#define MCDE_OVL4CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, __x)
+#define MCDE_OVL4CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL4CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL4CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL4CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL4CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL4CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL4CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL4CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, \
+ MCDE_OVL4CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL4CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL4CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL4CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL4CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL4CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL4CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL4CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL4CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL4CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL4CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, MCDE_OVL4CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL4CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL5CR 0x000004A0
+#define MCDE_OVL5CR_OVLEN_SHIFT 0
+#define MCDE_OVL5CR_OVLEN_MASK 0x00000001
+#define MCDE_OVL5CR_OVLEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLEN, __x)
+#define MCDE_OVL5CR_COLCCTRL_SHIFT 1
+#define MCDE_OVL5CR_COLCCTRL_MASK 0x00000006
+#define MCDE_OVL5CR_COLCCTRL_DISABLED 0
+#define MCDE_OVL5CR_COLCCTRL_ENABLED_NO_SAT 1
+#define MCDE_OVL5CR_COLCCTRL_ENABLED_SAT 2
+#define MCDE_OVL5CR_COLCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, MCDE_OVL5CR_COLCCTRL_##__x)
+#define MCDE_OVL5CR_COLCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, __x)
+#define MCDE_OVL5CR_CKEYGEN_SHIFT 3
+#define MCDE_OVL5CR_CKEYGEN_MASK 0x00000008
+#define MCDE_OVL5CR_CKEYGEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, CKEYGEN, __x)
+#define MCDE_OVL5CR_ALPHAPMEN_SHIFT 4
+#define MCDE_OVL5CR_ALPHAPMEN_MASK 0x00000010
+#define MCDE_OVL5CR_ALPHAPMEN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ALPHAPMEN, __x)
+#define MCDE_OVL5CR_OVLF_SHIFT 5
+#define MCDE_OVL5CR_OVLF_MASK 0x00000020
+#define MCDE_OVL5CR_OVLF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLF, __x)
+#define MCDE_OVL5CR_OVLR_SHIFT 6
+#define MCDE_OVL5CR_OVLR_MASK 0x00000040
+#define MCDE_OVL5CR_OVLR(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLR, __x)
+#define MCDE_OVL5CR_OVLB_SHIFT 7
+#define MCDE_OVL5CR_OVLB_MASK 0x00000080
+#define MCDE_OVL5CR_OVLB(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, OVLB, __x)
+#define MCDE_OVL5CR_FETCH_ROPC_SHIFT 8
+#define MCDE_OVL5CR_FETCH_ROPC_MASK 0x0000FF00
+#define MCDE_OVL5CR_FETCH_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, FETCH_ROPC, __x)
+#define MCDE_OVL5CR_STBPRIO_SHIFT 16
+#define MCDE_OVL5CR_STBPRIO_MASK 0x000F0000
+#define MCDE_OVL5CR_STBPRIO(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, STBPRIO, __x)
+#define MCDE_OVL5CR_BURSTSIZE_SHIFT 20
+#define MCDE_OVL5CR_BURSTSIZE_MASK 0x00F00000
+#define MCDE_OVL5CR_BURSTSIZE_1W 0
+#define MCDE_OVL5CR_BURSTSIZE_2W 1
+#define MCDE_OVL5CR_BURSTSIZE_4W 2
+#define MCDE_OVL5CR_BURSTSIZE_8W 3
+#define MCDE_OVL5CR_BURSTSIZE_16W 4
+#define MCDE_OVL5CR_BURSTSIZE_HW_1W 8
+#define MCDE_OVL5CR_BURSTSIZE_HW_2W 9
+#define MCDE_OVL5CR_BURSTSIZE_HW_4W 10
+#define MCDE_OVL5CR_BURSTSIZE_HW_8W 11
+#define MCDE_OVL5CR_BURSTSIZE_HW_16W 12
+#define MCDE_OVL5CR_BURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, MCDE_OVL5CR_BURSTSIZE_##__x)
+#define MCDE_OVL5CR_BURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, __x)
+#define MCDE_OVL5CR_MAXOUTSTANDING_SHIFT 24
+#define MCDE_OVL5CR_MAXOUTSTANDING_MASK 0x0F000000
+#define MCDE_OVL5CR_MAXOUTSTANDING_1_REQ 0
+#define MCDE_OVL5CR_MAXOUTSTANDING_2_REQ 1
+#define MCDE_OVL5CR_MAXOUTSTANDING_4_REQ 2
+#define MCDE_OVL5CR_MAXOUTSTANDING_8_REQ 3
+#define MCDE_OVL5CR_MAXOUTSTANDING_16_REQ 4
+#define MCDE_OVL5CR_MAXOUTSTANDING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, \
+ MCDE_OVL5CR_MAXOUTSTANDING_##__x)
+#define MCDE_OVL5CR_MAXOUTSTANDING(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, __x)
+#define MCDE_OVL5CR_ROTBURSTSIZE_SHIFT 28
+#define MCDE_OVL5CR_ROTBURSTSIZE_MASK 0xF0000000
+#define MCDE_OVL5CR_ROTBURSTSIZE_1W 0
+#define MCDE_OVL5CR_ROTBURSTSIZE_2W 1
+#define MCDE_OVL5CR_ROTBURSTSIZE_4W 2
+#define MCDE_OVL5CR_ROTBURSTSIZE_8W 3
+#define MCDE_OVL5CR_ROTBURSTSIZE_16W 4
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_1W 8
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_2W 9
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_4W 10
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_8W 11
+#define MCDE_OVL5CR_ROTBURSTSIZE_HW_16W 12
+#define MCDE_OVL5CR_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, MCDE_OVL5CR_ROTBURSTSIZE_##__x)
+#define MCDE_OVL5CR_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, __x)
+#define MCDE_OVL0CONF 0x00000404
+#define MCDE_OVL0CONF_GROUPOFFSET 0x20
+#define MCDE_OVL0CONF_PPL_SHIFT 0
+#define MCDE_OVL0CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL0CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, PPL, __x)
+#define MCDE_OVL0CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL0CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL0CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, EXTSRC_ID, __x)
+#define MCDE_OVL0CONF_LPF_SHIFT 16
+#define MCDE_OVL0CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL0CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF, LPF, __x)
+#define MCDE_OVL1CONF 0x00000424
+#define MCDE_OVL1CONF_PPL_SHIFT 0
+#define MCDE_OVL1CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL1CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, PPL, __x)
+#define MCDE_OVL1CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL1CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL1CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, EXTSRC_ID, __x)
+#define MCDE_OVL1CONF_LPF_SHIFT 16
+#define MCDE_OVL1CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL1CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF, LPF, __x)
+#define MCDE_OVL2CONF 0x00000444
+#define MCDE_OVL2CONF_PPL_SHIFT 0
+#define MCDE_OVL2CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL2CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, PPL, __x)
+#define MCDE_OVL2CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL2CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL2CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, EXTSRC_ID, __x)
+#define MCDE_OVL2CONF_LPF_SHIFT 16
+#define MCDE_OVL2CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL2CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF, LPF, __x)
+#define MCDE_OVL3CONF 0x00000464
+#define MCDE_OVL3CONF_PPL_SHIFT 0
+#define MCDE_OVL3CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL3CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, PPL, __x)
+#define MCDE_OVL3CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL3CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL3CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, EXTSRC_ID, __x)
+#define MCDE_OVL3CONF_LPF_SHIFT 16
+#define MCDE_OVL3CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL3CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF, LPF, __x)
+#define MCDE_OVL4CONF 0x00000484
+#define MCDE_OVL4CONF_PPL_SHIFT 0
+#define MCDE_OVL4CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL4CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, PPL, __x)
+#define MCDE_OVL4CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL4CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL4CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, EXTSRC_ID, __x)
+#define MCDE_OVL4CONF_LPF_SHIFT 16
+#define MCDE_OVL4CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL4CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF, LPF, __x)
+#define MCDE_OVL5CONF 0x000004A4
+#define MCDE_OVL5CONF_PPL_SHIFT 0
+#define MCDE_OVL5CONF_PPL_MASK 0x000007FF
+#define MCDE_OVL5CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, PPL, __x)
+#define MCDE_OVL5CONF_EXTSRC_ID_SHIFT 11
+#define MCDE_OVL5CONF_EXTSRC_ID_MASK 0x00007800
+#define MCDE_OVL5CONF_EXTSRC_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, EXTSRC_ID, __x)
+#define MCDE_OVL5CONF_LPF_SHIFT 16
+#define MCDE_OVL5CONF_LPF_MASK 0x07FF0000
+#define MCDE_OVL5CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF, LPF, __x)
+#define MCDE_OVL0CONF2 0x00000408
+#define MCDE_OVL0CONF2_GROUPOFFSET 0x20
+#define MCDE_OVL0CONF2_BP_SHIFT 0
+#define MCDE_OVL0CONF2_BP_MASK 0x00000001
+#define MCDE_OVL0CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL0CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL0CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, BP, MCDE_OVL0CONF2_BP_##__x)
+#define MCDE_OVL0CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, BP, __x)
+#define MCDE_OVL0CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL0CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL0CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL0CONF2_OPQ_SHIFT 9
+#define MCDE_OVL0CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL0CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, OPQ, __x)
+#define MCDE_OVL0CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL0CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL0CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, PIXOFF, __x)
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL1CONF2 0x00000428
+#define MCDE_OVL1CONF2_BP_SHIFT 0
+#define MCDE_OVL1CONF2_BP_MASK 0x00000001
+#define MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL1CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL1CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, BP, MCDE_OVL1CONF2_BP_##__x)
+#define MCDE_OVL1CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, BP, __x)
+#define MCDE_OVL1CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL1CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL1CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL1CONF2_OPQ_SHIFT 9
+#define MCDE_OVL1CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL1CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, OPQ, __x)
+#define MCDE_OVL1CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL1CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL1CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, PIXOFF, __x)
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL2CONF2 0x00000448
+#define MCDE_OVL2CONF2_BP_SHIFT 0
+#define MCDE_OVL2CONF2_BP_MASK 0x00000001
+#define MCDE_OVL2CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL2CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL2CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, BP, MCDE_OVL2CONF2_BP_##__x)
+#define MCDE_OVL2CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, BP, __x)
+#define MCDE_OVL2CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL2CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL2CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL2CONF2_OPQ_SHIFT 9
+#define MCDE_OVL2CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL2CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, OPQ, __x)
+#define MCDE_OVL2CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL2CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL2CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, PIXOFF, __x)
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL3CONF2 0x00000468
+#define MCDE_OVL3CONF2_BP_SHIFT 0
+#define MCDE_OVL3CONF2_BP_MASK 0x00000001
+#define MCDE_OVL3CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL3CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL3CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, BP, MCDE_OVL3CONF2_BP_##__x)
+#define MCDE_OVL3CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, BP, __x)
+#define MCDE_OVL3CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL3CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL3CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL3CONF2_OPQ_SHIFT 9
+#define MCDE_OVL3CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL3CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, OPQ, __x)
+#define MCDE_OVL3CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL3CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL3CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, PIXOFF, __x)
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL4CONF2 0x00000488
+#define MCDE_OVL4CONF2_BP_SHIFT 0
+#define MCDE_OVL4CONF2_BP_MASK 0x00000001
+#define MCDE_OVL4CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL4CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL4CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, BP, MCDE_OVL4CONF2_BP_##__x)
+#define MCDE_OVL4CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, BP, __x)
+#define MCDE_OVL4CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL4CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL4CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL4CONF2_OPQ_SHIFT 9
+#define MCDE_OVL4CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL4CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, OPQ, __x)
+#define MCDE_OVL4CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL4CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL4CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, PIXOFF, __x)
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL5CONF2 0x000004A8
+#define MCDE_OVL5CONF2_BP_SHIFT 0
+#define MCDE_OVL5CONF2_BP_MASK 0x00000001
+#define MCDE_OVL5CONF2_BP_PER_PIXEL_ALPHA 0
+#define MCDE_OVL5CONF2_BP_CONSTANT_ALPHA 1
+#define MCDE_OVL5CONF2_BP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, BP, MCDE_OVL5CONF2_BP_##__x)
+#define MCDE_OVL5CONF2_BP(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, BP, __x)
+#define MCDE_OVL5CONF2_ALPHAVALUE_SHIFT 1
+#define MCDE_OVL5CONF2_ALPHAVALUE_MASK 0x000001FE
+#define MCDE_OVL5CONF2_ALPHAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, ALPHAVALUE, __x)
+#define MCDE_OVL5CONF2_OPQ_SHIFT 9
+#define MCDE_OVL5CONF2_OPQ_MASK 0x00000200
+#define MCDE_OVL5CONF2_OPQ(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, OPQ, __x)
+#define MCDE_OVL5CONF2_PIXOFF_SHIFT 10
+#define MCDE_OVL5CONF2_PIXOFF_MASK 0x0000FC00
+#define MCDE_OVL5CONF2_PIXOFF(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, PIXOFF, __x)
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000
+#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CONF2, PIXELFETCHERWATERMARKLEVEL, __x)
+#define MCDE_OVL0LJINC 0x0000040C
+#define MCDE_OVL0LJINC_GROUPOFFSET 0x20
+#define MCDE_OVL0LJINC_LJINC_SHIFT 0
+#define MCDE_OVL0LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL0LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL0LJINC, LJINC, __x)
+#define MCDE_OVL1LJINC 0x0000042C
+#define MCDE_OVL1LJINC_LJINC_SHIFT 0
+#define MCDE_OVL1LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL1LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL1LJINC, LJINC, __x)
+#define MCDE_OVL2LJINC 0x0000044C
+#define MCDE_OVL2LJINC_LJINC_SHIFT 0
+#define MCDE_OVL2LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL2LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL2LJINC, LJINC, __x)
+#define MCDE_OVL3LJINC 0x0000046C
+#define MCDE_OVL3LJINC_LJINC_SHIFT 0
+#define MCDE_OVL3LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL3LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL3LJINC, LJINC, __x)
+#define MCDE_OVL4LJINC 0x0000048C
+#define MCDE_OVL4LJINC_LJINC_SHIFT 0
+#define MCDE_OVL4LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL4LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL4LJINC, LJINC, __x)
+#define MCDE_OVL5LJINC 0x000004AC
+#define MCDE_OVL5LJINC_LJINC_SHIFT 0
+#define MCDE_OVL5LJINC_LJINC_MASK 0xFFFFFFFF
+#define MCDE_OVL5LJINC_LJINC(__x) \
+ MCDE_VAL2REG(MCDE_OVL5LJINC, LJINC, __x)
+#define MCDE_OVL0CROP 0x00000410
+#define MCDE_OVL0CROP_GROUPOFFSET 0x20
+#define MCDE_OVL0CROP_TMRGN_SHIFT 0
+#define MCDE_OVL0CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL0CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CROP, TMRGN, __x)
+#define MCDE_OVL0CROP_LMRGN_SHIFT 22
+#define MCDE_OVL0CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL0CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL0CROP, LMRGN, __x)
+#define MCDE_OVL1CROP 0x00000430
+#define MCDE_OVL1CROP_TMRGN_SHIFT 0
+#define MCDE_OVL1CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL1CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CROP, TMRGN, __x)
+#define MCDE_OVL1CROP_LMRGN_SHIFT 22
+#define MCDE_OVL1CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL1CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL1CROP, LMRGN, __x)
+#define MCDE_OVL2CROP 0x00000450
+#define MCDE_OVL2CROP_TMRGN_SHIFT 0
+#define MCDE_OVL2CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL2CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CROP, TMRGN, __x)
+#define MCDE_OVL2CROP_LMRGN_SHIFT 22
+#define MCDE_OVL2CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL2CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL2CROP, LMRGN, __x)
+#define MCDE_OVL3CROP 0x00000470
+#define MCDE_OVL3CROP_TMRGN_SHIFT 0
+#define MCDE_OVL3CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL3CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CROP, TMRGN, __x)
+#define MCDE_OVL3CROP_LMRGN_SHIFT 22
+#define MCDE_OVL3CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL3CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL3CROP, LMRGN, __x)
+#define MCDE_OVL4CROP 0x00000490
+#define MCDE_OVL4CROP_TMRGN_SHIFT 0
+#define MCDE_OVL4CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL4CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CROP, TMRGN, __x)
+#define MCDE_OVL4CROP_LMRGN_SHIFT 22
+#define MCDE_OVL4CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL4CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL4CROP, LMRGN, __x)
+#define MCDE_OVL5CROP 0x000004B0
+#define MCDE_OVL5CROP_TMRGN_SHIFT 0
+#define MCDE_OVL5CROP_TMRGN_MASK 0x003FFFFF
+#define MCDE_OVL5CROP_TMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CROP, TMRGN, __x)
+#define MCDE_OVL5CROP_LMRGN_SHIFT 22
+#define MCDE_OVL5CROP_LMRGN_MASK 0xFFC00000
+#define MCDE_OVL5CROP_LMRGN(__x) \
+ MCDE_VAL2REG(MCDE_OVL5CROP, LMRGN, __x)
+#define MCDE_OVL0COMP 0x00000414
+#define MCDE_OVL0COMP_GROUPOFFSET 0x20
+#define MCDE_OVL0COMP_XPOS_SHIFT 0
+#define MCDE_OVL0COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL0COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, XPOS, __x)
+#define MCDE_OVL0COMP_CH_ID_SHIFT 11
+#define MCDE_OVL0COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL0COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, CH_ID, __x)
+#define MCDE_OVL0COMP_YPOS_SHIFT 16
+#define MCDE_OVL0COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL0COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, YPOS, __x)
+#define MCDE_OVL0COMP_Z_SHIFT 27
+#define MCDE_OVL0COMP_Z_MASK 0x78000000
+#define MCDE_OVL0COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL0COMP, Z, __x)
+#define MCDE_OVL1COMP 0x00000434
+#define MCDE_OVL1COMP_XPOS_SHIFT 0
+#define MCDE_OVL1COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL1COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, XPOS, __x)
+#define MCDE_OVL1COMP_CH_ID_SHIFT 11
+#define MCDE_OVL1COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL1COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, CH_ID, __x)
+#define MCDE_OVL1COMP_YPOS_SHIFT 16
+#define MCDE_OVL1COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL1COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, YPOS, __x)
+#define MCDE_OVL1COMP_Z_SHIFT 27
+#define MCDE_OVL1COMP_Z_MASK 0x78000000
+#define MCDE_OVL1COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL1COMP, Z, __x)
+#define MCDE_OVL2COMP 0x00000454
+#define MCDE_OVL2COMP_XPOS_SHIFT 0
+#define MCDE_OVL2COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL2COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, XPOS, __x)
+#define MCDE_OVL2COMP_CH_ID_SHIFT 11
+#define MCDE_OVL2COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL2COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, CH_ID, __x)
+#define MCDE_OVL2COMP_YPOS_SHIFT 16
+#define MCDE_OVL2COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL2COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, YPOS, __x)
+#define MCDE_OVL2COMP_Z_SHIFT 27
+#define MCDE_OVL2COMP_Z_MASK 0x78000000
+#define MCDE_OVL2COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL2COMP, Z, __x)
+#define MCDE_OVL3COMP 0x00000474
+#define MCDE_OVL3COMP_XPOS_SHIFT 0
+#define MCDE_OVL3COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL3COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, XPOS, __x)
+#define MCDE_OVL3COMP_CH_ID_SHIFT 11
+#define MCDE_OVL3COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL3COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, CH_ID, __x)
+#define MCDE_OVL3COMP_YPOS_SHIFT 16
+#define MCDE_OVL3COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL3COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, YPOS, __x)
+#define MCDE_OVL3COMP_Z_SHIFT 27
+#define MCDE_OVL3COMP_Z_MASK 0x78000000
+#define MCDE_OVL3COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL3COMP, Z, __x)
+#define MCDE_OVL4COMP 0x00000494
+#define MCDE_OVL4COMP_XPOS_SHIFT 0
+#define MCDE_OVL4COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL4COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, XPOS, __x)
+#define MCDE_OVL4COMP_CH_ID_SHIFT 11
+#define MCDE_OVL4COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL4COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, CH_ID, __x)
+#define MCDE_OVL4COMP_YPOS_SHIFT 16
+#define MCDE_OVL4COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL4COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, YPOS, __x)
+#define MCDE_OVL4COMP_Z_SHIFT 27
+#define MCDE_OVL4COMP_Z_MASK 0x78000000
+#define MCDE_OVL4COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL4COMP, Z, __x)
+#define MCDE_OVL5COMP 0x000004B4
+#define MCDE_OVL5COMP_XPOS_SHIFT 0
+#define MCDE_OVL5COMP_XPOS_MASK 0x000007FF
+#define MCDE_OVL5COMP_XPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, XPOS, __x)
+#define MCDE_OVL5COMP_CH_ID_SHIFT 11
+#define MCDE_OVL5COMP_CH_ID_MASK 0x00007800
+#define MCDE_OVL5COMP_CH_ID(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, CH_ID, __x)
+#define MCDE_OVL5COMP_YPOS_SHIFT 16
+#define MCDE_OVL5COMP_YPOS_MASK 0x07FF0000
+#define MCDE_OVL5COMP_YPOS(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, YPOS, __x)
+#define MCDE_OVL5COMP_Z_SHIFT 27
+#define MCDE_OVL5COMP_Z_MASK 0x78000000
+#define MCDE_OVL5COMP_Z(__x) \
+ MCDE_VAL2REG(MCDE_OVL5COMP, Z, __x)
+#define MCDE_CHNL0CONF 0x00000600
+#define MCDE_CHNL0CONF_GROUPOFFSET 0x20
+#define MCDE_CHNL0CONF_PPL_SHIFT 0
+#define MCDE_CHNL0CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL0CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0CONF, PPL, __x)
+#define MCDE_CHNL0CONF_LPF_SHIFT 16
+#define MCDE_CHNL0CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL0CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0CONF, LPF, __x)
+#define MCDE_CHNL1CONF 0x00000620
+#define MCDE_CHNL1CONF_PPL_SHIFT 0
+#define MCDE_CHNL1CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL1CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1CONF, PPL, __x)
+#define MCDE_CHNL1CONF_LPF_SHIFT 16
+#define MCDE_CHNL1CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL1CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1CONF, LPF, __x)
+#define MCDE_CHNL2CONF 0x00000640
+#define MCDE_CHNL2CONF_PPL_SHIFT 0
+#define MCDE_CHNL2CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL2CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2CONF, PPL, __x)
+#define MCDE_CHNL2CONF_LPF_SHIFT 16
+#define MCDE_CHNL2CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL2CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2CONF, LPF, __x)
+#define MCDE_CHNL3CONF 0x00000660
+#define MCDE_CHNL3CONF_PPL_SHIFT 0
+#define MCDE_CHNL3CONF_PPL_MASK 0x000007FF
+#define MCDE_CHNL3CONF_PPL(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3CONF, PPL, __x)
+#define MCDE_CHNL3CONF_LPF_SHIFT 16
+#define MCDE_CHNL3CONF_LPF_MASK 0x07FF0000
+#define MCDE_CHNL3CONF_LPF(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3CONF, LPF, __x)
+#define MCDE_CHNL0STAT 0x00000604
+#define MCDE_CHNL0STAT_GROUPOFFSET 0x20
+#define MCDE_CHNL0STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL0STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL0STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLRD, __x)
+#define MCDE_CHNL0STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL0STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL0STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLA, __x)
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL1STAT 0x00000624
+#define MCDE_CHNL1STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL1STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL1STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLRD, __x)
+#define MCDE_CHNL1STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL1STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL1STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLA, __x)
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL2STAT 0x00000644
+#define MCDE_CHNL2STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL2STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL2STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLRD, __x)
+#define MCDE_CHNL2STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL2STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL2STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLA, __x)
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL3STAT 0x00000664
+#define MCDE_CHNL3STAT_CHNLRD_SHIFT 0
+#define MCDE_CHNL3STAT_CHNLRD_MASK 0x00000001
+#define MCDE_CHNL3STAT_CHNLRD(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLRD, __x)
+#define MCDE_CHNL3STAT_CHNLA_SHIFT 1
+#define MCDE_CHNL3STAT_CHNLA_MASK 0x00000002
+#define MCDE_CHNL3STAT_CHNLA(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLA, __x)
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_SHIFT 16
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_MASK 0x00010000
+#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLBLBCKGND_EN, __x)
+#define MCDE_CHNL0SYNCHMOD 0x00000608
+#define MCDE_CHNL0SYNCHMOD_GROUPOFFSET 0x20
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL0SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL1SYNCHMOD 0x00000628
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL1SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL2SYNCHMOD 0x00000648
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL2SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL3SYNCHMOD 0x00000668
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SHIFT 0
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_MASK 0x00000003
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_HARDWARE 0
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_NO_SYNCH 1
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SOFTWARE 2
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, \
+ MCDE_CHNL3SYNCHMOD_SRC_SYNCH_##__x)
+#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, __x)
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_TE0 1
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_TE1 2
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, \
+ MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_##__x)
+#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, __x)
+#define MCDE_CHNL0SYNCHSW 0x0000060C
+#define MCDE_CHNL0SYNCHSW_GROUPOFFSET 0x20
+#define MCDE_CHNL0SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL0SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL0SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL1SYNCHSW 0x0000062C
+#define MCDE_CHNL1SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL1SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL1SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL2SYNCHSW 0x0000064C
+#define MCDE_CHNL2SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL2SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL2SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL3SYNCHSW 0x0000066C
+#define MCDE_CHNL3SYNCHSW_SW_TRIG_SHIFT 0
+#define MCDE_CHNL3SYNCHSW_SW_TRIG_MASK 0x00000001
+#define MCDE_CHNL3SYNCHSW_SW_TRIG(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3SYNCHSW, SW_TRIG, __x)
+#define MCDE_CHNL0BCKGNDCOL 0x00000610
+#define MCDE_CHNL0BCKGNDCOL_GROUPOFFSET 0x20
+#define MCDE_CHNL0BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL0BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL0BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, B, __x)
+#define MCDE_CHNL0BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL0BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL0BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, G, __x)
+#define MCDE_CHNL0BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL0BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL0BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, R, __x)
+#define MCDE_CHNL1BCKGNDCOL 0x00000630
+#define MCDE_CHNL1BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL1BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL1BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, B, __x)
+#define MCDE_CHNL1BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL1BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL1BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, G, __x)
+#define MCDE_CHNL1BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL1BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL1BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, R, __x)
+#define MCDE_CHNL2BCKGNDCOL 0x00000650
+#define MCDE_CHNL2BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL2BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL2BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, B, __x)
+#define MCDE_CHNL2BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL2BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL2BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, G, __x)
+#define MCDE_CHNL2BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL2BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL2BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, R, __x)
+#define MCDE_CHNL3BCKGNDCOL 0x00000670
+#define MCDE_CHNL3BCKGNDCOL_B_SHIFT 0
+#define MCDE_CHNL3BCKGNDCOL_B_MASK 0x000000FF
+#define MCDE_CHNL3BCKGNDCOL_B(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, B, __x)
+#define MCDE_CHNL3BCKGNDCOL_G_SHIFT 8
+#define MCDE_CHNL3BCKGNDCOL_G_MASK 0x0000FF00
+#define MCDE_CHNL3BCKGNDCOL_G(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, G, __x)
+#define MCDE_CHNL3BCKGNDCOL_R_SHIFT 16
+#define MCDE_CHNL3BCKGNDCOL_R_MASK 0x00FF0000
+#define MCDE_CHNL3BCKGNDCOL_R(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, R, __x)
+#define MCDE_CHNL0MUXING 0x00000614
+#define MCDE_CHNL0MUXING_GROUPOFFSET 0x20
+#define MCDE_CHNL0MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL0MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL0MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0MUXING, FIFO_ID, MCDE_CHNL0MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL0MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL0MUXING, FIFO_ID, __x)
+#define MCDE_CHNL1MUXING 0x00000634
+#define MCDE_CHNL1MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL1MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL1MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1MUXING, FIFO_ID, MCDE_CHNL1MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL1MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL1MUXING, FIFO_ID, __x)
+#define MCDE_CHNL2MUXING 0x00000654
+#define MCDE_CHNL2MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL2MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL2MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2MUXING, FIFO_ID, MCDE_CHNL2MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL2MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL2MUXING, FIFO_ID, __x)
+#define MCDE_CHNL3MUXING 0x00000674
+#define MCDE_CHNL3MUXING_FIFO_ID_SHIFT 0
+#define MCDE_CHNL3MUXING_FIFO_ID_MASK 0x00000007
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_A 0
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_B 1
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_C0 2
+#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_C1 3
+#define MCDE_CHNL3MUXING_FIFO_ID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3MUXING, FIFO_ID, MCDE_CHNL3MUXING_FIFO_ID_##__x)
+#define MCDE_CHNL3MUXING_FIFO_ID(__x) \
+ MCDE_VAL2REG(MCDE_CHNL3MUXING, FIFO_ID, __x)
+#define MCDE_CRA0 0x00000800
+#define MCDE_CRA0_GROUPOFFSET 0x200
+#define MCDE_CRA0_FLOEN_SHIFT 0
+#define MCDE_CRA0_FLOEN_MASK 0x00000001
+#define MCDE_CRA0_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOEN, __x)
+#define MCDE_CRA0_BLENDEN_SHIFT 2
+#define MCDE_CRA0_BLENDEN_MASK 0x00000004
+#define MCDE_CRA0_BLENDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDEN, __x)
+#define MCDE_CRA0_AFLICKEN_SHIFT 3
+#define MCDE_CRA0_AFLICKEN_MASK 0x00000008
+#define MCDE_CRA0_AFLICKEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, AFLICKEN, __x)
+#define MCDE_CRA0_PALEN_SHIFT 4
+#define MCDE_CRA0_PALEN_MASK 0x00000010
+#define MCDE_CRA0_PALEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, PALEN, __x)
+#define MCDE_CRA0_DITHEN_SHIFT 5
+#define MCDE_CRA0_DITHEN_MASK 0x00000020
+#define MCDE_CRA0_DITHEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, DITHEN, __x)
+#define MCDE_CRA0_GAMEN_SHIFT 6
+#define MCDE_CRA0_GAMEN_MASK 0x00000040
+#define MCDE_CRA0_GAMEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, GAMEN, __x)
+#define MCDE_CRA0_KEYCTRL_SHIFT 7
+#define MCDE_CRA0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRA0_KEYCTRL_OFF 0
+#define MCDE_CRA0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRA0_KEYCTRL_RGB 2
+#define MCDE_CRA0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRA0_KEYCTRL_FRGB 5
+#define MCDE_CRA0_KEYCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, MCDE_CRA0_KEYCTRL_##__x)
+#define MCDE_CRA0_KEYCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, __x)
+#define MCDE_CRA0_BLENDCTRL_SHIFT 10
+#define MCDE_CRA0_BLENDCTRL_MASK 0x00000400
+#define MCDE_CRA0_BLENDCTRL_SOURCE 0
+#define MCDE_CRA0_BLENDCTRL_CONSTANT 1
+#define MCDE_CRA0_BLENDCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, MCDE_CRA0_BLENDCTRL_##__x)
+#define MCDE_CRA0_BLENDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, __x)
+#define MCDE_CRA0_FLICKMODE_SHIFT 11
+#define MCDE_CRA0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRA0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRA0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRA0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRA0_FLICKMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, MCDE_CRA0_FLICKMODE_##__x)
+#define MCDE_CRA0_FLICKMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, __x)
+#define MCDE_CRA0_FLOCKFORMAT_SHIFT 13
+#define MCDE_CRA0_FLOCKFORMAT_MASK 0x00002000
+#define MCDE_CRA0_FLOCKFORMAT_YCBCR 0
+#define MCDE_CRA0_FLOCKFORMAT_RGB 1
+#define MCDE_CRA0_FLOCKFORMAT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, MCDE_CRA0_FLOCKFORMAT_##__x)
+#define MCDE_CRA0_FLOCKFORMAT(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, __x)
+#define MCDE_CRA0_PALMODE_SHIFT 14
+#define MCDE_CRA0_PALMODE_MASK 0x00004000
+#define MCDE_CRA0_PALMODE_PALETTE 0
+#define MCDE_CRA0_PALMODE_GAMMA 1
+#define MCDE_CRA0_PALMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, PALMODE, __x)
+#define MCDE_CRA0_OLEDEN_SHIFT 15
+#define MCDE_CRA0_OLEDEN_MASK 0x00008000
+#define MCDE_CRA0_OLEDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, OLEDEN, __x)
+#define MCDE_CRA0_ALPHABLEND_SHIFT 16
+#define MCDE_CRA0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRA0_ALPHABLEND(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ALPHABLEND, __x)
+#define MCDE_CRA0_ROTEN_SHIFT 24
+#define MCDE_CRA0_ROTEN_MASK 0x01000000
+#define MCDE_CRA0_ROTEN(__x) \
+ MCDE_VAL2REG(MCDE_CRA0, ROTEN, __x)
+#define MCDE_CRB0 0x00000A00
+#define MCDE_CRB0_FLOEN_SHIFT 0
+#define MCDE_CRB0_FLOEN_MASK 0x00000001
+#define MCDE_CRB0_FLOEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOEN, __x)
+#define MCDE_CRB0_BLENDEN_SHIFT 2
+#define MCDE_CRB0_BLENDEN_MASK 0x00000004
+#define MCDE_CRB0_BLENDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDEN, __x)
+#define MCDE_CRB0_AFLICKEN_SHIFT 3
+#define MCDE_CRB0_AFLICKEN_MASK 0x00000008
+#define MCDE_CRB0_AFLICKEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, AFLICKEN, __x)
+#define MCDE_CRB0_PALEN_SHIFT 4
+#define MCDE_CRB0_PALEN_MASK 0x00000010
+#define MCDE_CRB0_PALEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, PALEN, __x)
+#define MCDE_CRB0_DITHEN_SHIFT 5
+#define MCDE_CRB0_DITHEN_MASK 0x00000020
+#define MCDE_CRB0_DITHEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, DITHEN, __x)
+#define MCDE_CRB0_GAMEN_SHIFT 6
+#define MCDE_CRB0_GAMEN_MASK 0x00000040
+#define MCDE_CRB0_GAMEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, GAMEN, __x)
+#define MCDE_CRB0_KEYCTRL_SHIFT 7
+#define MCDE_CRB0_KEYCTRL_MASK 0x00000380
+#define MCDE_CRB0_KEYCTRL_OFF 0
+#define MCDE_CRB0_KEYCTRL_ALPHA_RGB 1
+#define MCDE_CRB0_KEYCTRL_RGB 2
+#define MCDE_CRB0_KEYCTRL_FALPHA_FRGB 4
+#define MCDE_CRB0_KEYCTRL_FRGB 5
+#define MCDE_CRB0_KEYCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, MCDE_CRB0_KEYCTRL_##__x)
+#define MCDE_CRB0_KEYCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, __x)
+#define MCDE_CRB0_BLENDCTRL_SHIFT 10
+#define MCDE_CRB0_BLENDCTRL_MASK 0x00000400
+#define MCDE_CRB0_BLENDCTRL_SOURCE 0
+#define MCDE_CRB0_BLENDCTRL_CONSTANT 1
+#define MCDE_CRB0_BLENDCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, MCDE_CRB0_BLENDCTRL_##__x)
+#define MCDE_CRB0_BLENDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, __x)
+#define MCDE_CRB0_FLICKMODE_SHIFT 11
+#define MCDE_CRB0_FLICKMODE_MASK 0x00001800
+#define MCDE_CRB0_FLICKMODE_FORCE_FILTER_0 0
+#define MCDE_CRB0_FLICKMODE_ADAPTIVE 1
+#define MCDE_CRB0_FLICKMODE_TEST_MODE 2
+#define MCDE_CRB0_FLICKMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, MCDE_CRB0_FLICKMODE_##__x)
+#define MCDE_CRB0_FLICKMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, __x)
+#define MCDE_CRB0_FLOCKFORMAT_SHIFT 13
+#define MCDE_CRB0_FLOCKFORMAT_MASK 0x00002000
+#define MCDE_CRB0_FLOCKFORMAT_YCBCR 0
+#define MCDE_CRB0_FLOCKFORMAT_RGB 1
+#define MCDE_CRB0_FLOCKFORMAT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, MCDE_CRB0_FLOCKFORMAT_##__x)
+#define MCDE_CRB0_FLOCKFORMAT(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, __x)
+#define MCDE_CRB0_PALMODE_SHIFT 14
+#define MCDE_CRB0_PALMODE_MASK 0x00004000
+#define MCDE_CRB0_PALMODE_PALETTE 0
+#define MCDE_CRB0_PALMODE_GAMMA 1
+#define MCDE_CRB0_PALMODE(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, PALMODE, __x)
+#define MCDE_CRB0_OLEDEN_SHIFT 15
+#define MCDE_CRB0_OLEDEN_MASK 0x00008000
+#define MCDE_CRB0_OLEDEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, OLEDEN, __x)
+#define MCDE_CRB0_ALPHABLEND_SHIFT 16
+#define MCDE_CRB0_ALPHABLEND_MASK 0x00FF0000
+#define MCDE_CRB0_ALPHABLEND(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ALPHABLEND, __x)
+#define MCDE_CRB0_ROTEN_SHIFT 24
+#define MCDE_CRB0_ROTEN_MASK 0x01000000
+#define MCDE_CRB0_ROTEN(__x) \
+ MCDE_VAL2REG(MCDE_CRB0, ROTEN, __x)
+#define MCDE_CRA1 0x00000804
+#define MCDE_CRA1_GROUPOFFSET 0x200
+#define MCDE_CRA1_PCD_SHIFT 0
+#define MCDE_CRA1_PCD_MASK 0x000003FF
+#define MCDE_CRA1_PCD(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, PCD, __x)
+#define MCDE_CRA1_CLKSEL_SHIFT 10
+#define MCDE_CRA1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRA1_CLKSEL_CLKPLL72 0
+#define MCDE_CRA1_CLKSEL_CLKPLL27 2
+#define MCDE_CRA1_CLKSEL_TV1CLK 3
+#define MCDE_CRA1_CLKSEL_TV2CLK 4
+#define MCDE_CRA1_CLKSEL_MCDECLK 5
+#define MCDE_CRA1_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKSEL, MCDE_CRA1_CLKSEL_##__x)
+#define MCDE_CRA1_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKSEL, __x)
+#define MCDE_CRA1_CDWIN_SHIFT 13
+#define MCDE_CRA1_CDWIN_MASK 0x0001E000
+#define MCDE_CRA1_CDWIN_8BPP_C1 0
+#define MCDE_CRA1_CDWIN_12BPP_C1 1
+#define MCDE_CRA1_CDWIN_12BPP_C2 2
+#define MCDE_CRA1_CDWIN_16BPP_C1 3
+#define MCDE_CRA1_CDWIN_16BPP_C2 4
+#define MCDE_CRA1_CDWIN_16BPP_C3 5
+#define MCDE_CRA1_CDWIN_18BPP_C1 6
+#define MCDE_CRA1_CDWIN_18BPP_C2 7
+#define MCDE_CRA1_CDWIN_24BPP 8
+#define MCDE_CRA1_CDWIN_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CDWIN, MCDE_CRA1_CDWIN_##__x)
+#define MCDE_CRA1_CDWIN(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CDWIN, __x)
+#define MCDE_CRA1_OUTBPP_SHIFT 25
+#define MCDE_CRA1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRA1_OUTBPP_MONO1 0
+#define MCDE_CRA1_OUTBPP_MONO2 1
+#define MCDE_CRA1_OUTBPP_MONO4 2
+#define MCDE_CRA1_OUTBPP_MONO8 3
+#define MCDE_CRA1_OUTBPP_8BPP 4
+#define MCDE_CRA1_OUTBPP_12BPP 5
+#define MCDE_CRA1_OUTBPP_15BPP 6
+#define MCDE_CRA1_OUTBPP_16BPP 7
+#define MCDE_CRA1_OUTBPP_18BPP 8
+#define MCDE_CRA1_OUTBPP_24BPP 9
+#define MCDE_CRA1_OUTBPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, OUTBPP, MCDE_CRA1_OUTBPP_##__x)
+#define MCDE_CRA1_OUTBPP(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, OUTBPP, __x)
+#define MCDE_CRA1_BCD_SHIFT 29
+#define MCDE_CRA1_BCD_MASK 0x20000000
+#define MCDE_CRA1_BCD(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, BCD, __x)
+#define MCDE_CRA1_CLKTYPE_SHIFT 30
+#define MCDE_CRA1_CLKTYPE_MASK 0x40000000
+#define MCDE_CRA1_CLKTYPE_TVXCLKSEL0 0
+#define MCDE_CRA1_CLKTYPE_TVXCLKSEL1 1
+#define MCDE_CRA1_CLKTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, MCDE_CRA1_CLKTYPE_##__x)
+#define MCDE_CRA1_CLKTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, __x)
+#define MCDE_CRB1 0x00000A04
+#define MCDE_CRB1_PCD_SHIFT 0
+#define MCDE_CRB1_PCD_MASK 0x000003FF
+#define MCDE_CRB1_PCD(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, PCD, __x)
+#define MCDE_CRB1_CLKSEL_SHIFT 10
+#define MCDE_CRB1_CLKSEL_MASK 0x00001C00
+#define MCDE_CRB1_CLKSEL_CLKPLL72 0
+#define MCDE_CRB1_CLKSEL_CLKPLL27 2
+#define MCDE_CRB1_CLKSEL_TV1CLK 3
+#define MCDE_CRB1_CLKSEL_TV2CLK 4
+#define MCDE_CRB1_CLKSEL_MCDECLK 5
+#define MCDE_CRB1_CLKSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKSEL, MCDE_CRB1_CLKSEL_##__x)
+#define MCDE_CRB1_CLKSEL(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKSEL, __x)
+#define MCDE_CRB1_CDWIN_SHIFT 13
+#define MCDE_CRB1_CDWIN_MASK 0x0001E000
+#define MCDE_CRB1_CDWIN_8BPP_C1 0
+#define MCDE_CRB1_CDWIN_12BPP_C1 1
+#define MCDE_CRB1_CDWIN_12BPP_C2 2
+#define MCDE_CRB1_CDWIN_16BPP_C1 3
+#define MCDE_CRB1_CDWIN_16BPP_C2 4
+#define MCDE_CRB1_CDWIN_16BPP_C3 5
+#define MCDE_CRB1_CDWIN_18BPP_C1 6
+#define MCDE_CRB1_CDWIN_18BPP_C2 7
+#define MCDE_CRB1_CDWIN_24BPP 8
+#define MCDE_CRB1_CDWIN_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CDWIN, MCDE_CRB1_CDWIN_##__x)
+#define MCDE_CRB1_CDWIN(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CDWIN, __x)
+#define MCDE_CRB1_OUTBPP_SHIFT 25
+#define MCDE_CRB1_OUTBPP_MASK 0x1E000000
+#define MCDE_CRB1_OUTBPP_MONO1 0
+#define MCDE_CRB1_OUTBPP_MONO2 1
+#define MCDE_CRB1_OUTBPP_MONO4 2
+#define MCDE_CRB1_OUTBPP_MONO8 3
+#define MCDE_CRB1_OUTBPP_8BPP 4
+#define MCDE_CRB1_OUTBPP_12BPP 5
+#define MCDE_CRB1_OUTBPP_15BPP 6
+#define MCDE_CRB1_OUTBPP_16BPP 7
+#define MCDE_CRB1_OUTBPP_18BPP 8
+#define MCDE_CRB1_OUTBPP_24BPP 9
+#define MCDE_CRB1_OUTBPP_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, OUTBPP, MCDE_CRB1_OUTBPP_##__x)
+#define MCDE_CRB1_OUTBPP(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, OUTBPP, __x)
+#define MCDE_CRB1_BCD_SHIFT 29
+#define MCDE_CRB1_BCD_MASK 0x20000000
+#define MCDE_CRB1_BCD(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, BCD, __x)
+#define MCDE_CRB1_CLKTYPE_SHIFT 30
+#define MCDE_CRB1_CLKTYPE_MASK 0x40000000
+#define MCDE_CRB1_CLKTYPE_TVXCLKSEL0 0
+#define MCDE_CRB1_CLKTYPE_TVXCLKSEL1 1
+#define MCDE_CRB1_CLKTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, MCDE_CRB1_CLKTYPE_##__x)
+#define MCDE_CRB1_CLKTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, __x)
+#define MCDE_COLKEYA 0x00000808
+#define MCDE_COLKEYA_GROUPOFFSET 0x200
+#define MCDE_COLKEYA_KEYB_SHIFT 0
+#define MCDE_COLKEYA_KEYB_MASK 0x000000FF
+#define MCDE_COLKEYA_KEYB(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYB, __x)
+#define MCDE_COLKEYA_KEYG_SHIFT 8
+#define MCDE_COLKEYA_KEYG_MASK 0x0000FF00
+#define MCDE_COLKEYA_KEYG(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYG, __x)
+#define MCDE_COLKEYA_KEYR_SHIFT 16
+#define MCDE_COLKEYA_KEYR_MASK 0x00FF0000
+#define MCDE_COLKEYA_KEYR(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYR, __x)
+#define MCDE_COLKEYA_KEYA_SHIFT 24
+#define MCDE_COLKEYA_KEYA_MASK 0xFF000000
+#define MCDE_COLKEYA_KEYA(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYA, KEYA, __x)
+#define MCDE_COLKEYB 0x00000A08
+#define MCDE_COLKEYB_KEYB_SHIFT 0
+#define MCDE_COLKEYB_KEYB_MASK 0x000000FF
+#define MCDE_COLKEYB_KEYB(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYB, __x)
+#define MCDE_COLKEYB_KEYG_SHIFT 8
+#define MCDE_COLKEYB_KEYG_MASK 0x0000FF00
+#define MCDE_COLKEYB_KEYG(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYG, __x)
+#define MCDE_COLKEYB_KEYR_SHIFT 16
+#define MCDE_COLKEYB_KEYR_MASK 0x00FF0000
+#define MCDE_COLKEYB_KEYR(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYR, __x)
+#define MCDE_COLKEYB_KEYA_SHIFT 24
+#define MCDE_COLKEYB_KEYA_MASK 0xFF000000
+#define MCDE_COLKEYB_KEYA(__x) \
+ MCDE_VAL2REG(MCDE_COLKEYB, KEYA, __x)
+#define MCDE_FCOLKEYA 0x0000080C
+#define MCDE_FCOLKEYA_GROUPOFFSET 0x200
+#define MCDE_FCOLKEYA_FKEYB_SHIFT 0
+#define MCDE_FCOLKEYA_FKEYB_MASK 0x000000FF
+#define MCDE_FCOLKEYA_FKEYB(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYB, __x)
+#define MCDE_FCOLKEYA_FKEYG_SHIFT 8
+#define MCDE_FCOLKEYA_FKEYG_MASK 0x0000FF00
+#define MCDE_FCOLKEYA_FKEYG(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYG, __x)
+#define MCDE_FCOLKEYA_FKEYR_SHIFT 16
+#define MCDE_FCOLKEYA_FKEYR_MASK 0x00FF0000
+#define MCDE_FCOLKEYA_FKEYR(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYR, __x)
+#define MCDE_FCOLKEYA_FKEYA_SHIFT 24
+#define MCDE_FCOLKEYA_FKEYA_MASK 0xFF000000
+#define MCDE_FCOLKEYA_FKEYA(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYA, __x)
+#define MCDE_FCOLKEYB 0x00000A0C
+#define MCDE_FCOLKEYB_FKEYB_SHIFT 0
+#define MCDE_FCOLKEYB_FKEYB_MASK 0x000000FF
+#define MCDE_FCOLKEYB_FKEYB(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYB, __x)
+#define MCDE_FCOLKEYB_FKEYG_SHIFT 8
+#define MCDE_FCOLKEYB_FKEYG_MASK 0x0000FF00
+#define MCDE_FCOLKEYB_FKEYG(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYG, __x)
+#define MCDE_FCOLKEYB_FKEYR_SHIFT 16
+#define MCDE_FCOLKEYB_FKEYR_MASK 0x00FF0000
+#define MCDE_FCOLKEYB_FKEYR(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYR, __x)
+#define MCDE_FCOLKEYB_FKEYA_SHIFT 24
+#define MCDE_FCOLKEYB_FKEYA_MASK 0xFF000000
+#define MCDE_FCOLKEYB_FKEYA(__x) \
+ MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYA, __x)
+#define MCDE_RGBCONV1A 0x00000810
+#define MCDE_RGBCONV1A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV1A_YR_GREEN_SHIFT 0
+#define MCDE_RGBCONV1A_YR_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV1A_YR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1A, YR_GREEN, __x)
+#define MCDE_RGBCONV1A_YR_RED_SHIFT 16
+#define MCDE_RGBCONV1A_YR_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV1A_YR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1A, YR_RED, __x)
+#define MCDE_RGBCONV1B 0x00000A10
+#define MCDE_RGBCONV1B_YR_GREEN_SHIFT 0
+#define MCDE_RGBCONV1B_YR_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV1B_YR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1B, YR_GREEN, __x)
+#define MCDE_RGBCONV1B_YR_RED_SHIFT 16
+#define MCDE_RGBCONV1B_YR_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV1B_YR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV1B, YR_RED, __x)
+#define MCDE_RGBCONV2A 0x00000814
+#define MCDE_RGBCONV2A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV2A_CR_RED_SHIFT 0
+#define MCDE_RGBCONV2A_CR_RED_MASK 0x000007FF
+#define MCDE_RGBCONV2A_CR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2A, CR_RED, __x)
+#define MCDE_RGBCONV2A_YR_BLUE_SHIFT 16
+#define MCDE_RGBCONV2A_YR_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV2A_YR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2A, YR_BLUE, __x)
+#define MCDE_RGBCONV2B 0x00000A14
+#define MCDE_RGBCONV2B_CR_RED_SHIFT 0
+#define MCDE_RGBCONV2B_CR_RED_MASK 0x000007FF
+#define MCDE_RGBCONV2B_CR_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2B, CR_RED, __x)
+#define MCDE_RGBCONV2B_YR_BLUE_SHIFT 16
+#define MCDE_RGBCONV2B_YR_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV2B_YR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV2B, YR_BLUE, __x)
+#define MCDE_RGBCONV3A 0x00000818
+#define MCDE_RGBCONV3A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV3A_CR_BLUE_SHIFT 0
+#define MCDE_RGBCONV3A_CR_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV3A_CR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3A, CR_BLUE, __x)
+#define MCDE_RGBCONV3A_CR_GREEN_SHIFT 16
+#define MCDE_RGBCONV3A_CR_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV3A_CR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3A, CR_GREEN, __x)
+#define MCDE_RGBCONV3B 0x00000A18
+#define MCDE_RGBCONV3B_CR_BLUE_SHIFT 0
+#define MCDE_RGBCONV3B_CR_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV3B_CR_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3B, CR_BLUE, __x)
+#define MCDE_RGBCONV3B_CR_GREEN_SHIFT 16
+#define MCDE_RGBCONV3B_CR_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV3B_CR_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV3B, CR_GREEN, __x)
+#define MCDE_RGBCONV4A 0x0000081C
+#define MCDE_RGBCONV4A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV4A_CB_GREEN_SHIFT 0
+#define MCDE_RGBCONV4A_CB_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV4A_CB_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4A, CB_GREEN, __x)
+#define MCDE_RGBCONV4A_CB_RED_SHIFT 16
+#define MCDE_RGBCONV4A_CB_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV4A_CB_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4A, CB_RED, __x)
+#define MCDE_RGBCONV4B 0x00000A1C
+#define MCDE_RGBCONV4B_CB_GREEN_SHIFT 0
+#define MCDE_RGBCONV4B_CB_GREEN_MASK 0x000007FF
+#define MCDE_RGBCONV4B_CB_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4B, CB_GREEN, __x)
+#define MCDE_RGBCONV4B_CB_RED_SHIFT 16
+#define MCDE_RGBCONV4B_CB_RED_MASK 0x07FF0000
+#define MCDE_RGBCONV4B_CB_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV4B, CB_RED, __x)
+#define MCDE_RGBCONV5A 0x00000820
+#define MCDE_RGBCONV5A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV5A_OFF_RED_SHIFT 0
+#define MCDE_RGBCONV5A_OFF_RED_MASK 0x000007FF
+#define MCDE_RGBCONV5A_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5A, OFF_RED, __x)
+#define MCDE_RGBCONV5A_CB_BLUE_SHIFT 16
+#define MCDE_RGBCONV5A_CB_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV5A_CB_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5A, CB_BLUE, __x)
+#define MCDE_RGBCONV5B 0x00000A20
+#define MCDE_RGBCONV5B_OFF_RED_SHIFT 0
+#define MCDE_RGBCONV5B_OFF_RED_MASK 0x000007FF
+#define MCDE_RGBCONV5B_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5B, OFF_RED, __x)
+#define MCDE_RGBCONV5B_CB_BLUE_SHIFT 16
+#define MCDE_RGBCONV5B_CB_BLUE_MASK 0x07FF0000
+#define MCDE_RGBCONV5B_CB_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV5B, CB_BLUE, __x)
+#define MCDE_RGBCONV6A 0x00000824
+#define MCDE_RGBCONV6A_GROUPOFFSET 0x200
+#define MCDE_RGBCONV6A_OFF_BLUE_SHIFT 0
+#define MCDE_RGBCONV6A_OFF_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV6A_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_BLUE, __x)
+#define MCDE_RGBCONV6A_OFF_GREEN_SHIFT 16
+#define MCDE_RGBCONV6A_OFF_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV6A_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_GREEN, __x)
+#define MCDE_RGBCONV6B 0x00000A24
+#define MCDE_RGBCONV6B_OFF_BLUE_SHIFT 0
+#define MCDE_RGBCONV6B_OFF_BLUE_MASK 0x000007FF
+#define MCDE_RGBCONV6B_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_BLUE, __x)
+#define MCDE_RGBCONV6B_OFF_GREEN_SHIFT 16
+#define MCDE_RGBCONV6B_OFF_GREEN_MASK 0x07FF0000
+#define MCDE_RGBCONV6B_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_GREEN, __x)
+#define MCDE_FFCOEF0 0x00000828
+#define MCDE_FFCOEF0_COEFF0_N1_SHIFT 0
+#define MCDE_FFCOEF0_COEFF0_N1_MASK 0x000000FF
+#define MCDE_FFCOEF0_COEFF0_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N1, __x)
+#define MCDE_FFCOEF0_COEFF0_N2_SHIFT 8
+#define MCDE_FFCOEF0_COEFF0_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF0_COEFF0_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N2, __x)
+#define MCDE_FFCOEF0_COEFF0_N3_SHIFT 16
+#define MCDE_FFCOEF0_COEFF0_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF0_COEFF0_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N3, __x)
+#define MCDE_FFCOEF0_T0_SHIFT 24
+#define MCDE_FFCOEF0_T0_MASK 0x0F000000
+#define MCDE_FFCOEF0_T0(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF0, T0, __x)
+#define MCDE_FFCOEF1 0x0000082C
+#define MCDE_FFCOEF1_COEFF1_N1_SHIFT 0
+#define MCDE_FFCOEF1_COEFF1_N1_MASK 0x000000FF
+#define MCDE_FFCOEF1_COEFF1_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N1, __x)
+#define MCDE_FFCOEF1_COEFF1_N2_SHIFT 8
+#define MCDE_FFCOEF1_COEFF1_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF1_COEFF1_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N2, __x)
+#define MCDE_FFCOEF1_COEFF1_N3_SHIFT 16
+#define MCDE_FFCOEF1_COEFF1_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF1_COEFF1_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N3, __x)
+#define MCDE_FFCOEF1_T1_SHIFT 24
+#define MCDE_FFCOEF1_T1_MASK 0x0F000000
+#define MCDE_FFCOEF1_T1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF1, T1, __x)
+#define MCDE_FFCOEF2 0x00000830
+#define MCDE_FFCOEF2_COEFF2_N1_SHIFT 0
+#define MCDE_FFCOEF2_COEFF2_N1_MASK 0x000000FF
+#define MCDE_FFCOEF2_COEFF2_N1(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N1, __x)
+#define MCDE_FFCOEF2_COEFF2_N2_SHIFT 8
+#define MCDE_FFCOEF2_COEFF2_N2_MASK 0x0000FF00
+#define MCDE_FFCOEF2_COEFF2_N2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N2, __x)
+#define MCDE_FFCOEF2_COEFF2_N3_SHIFT 16
+#define MCDE_FFCOEF2_COEFF2_N3_MASK 0x00FF0000
+#define MCDE_FFCOEF2_COEFF2_N3(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N3, __x)
+#define MCDE_FFCOEF2_T2_SHIFT 24
+#define MCDE_FFCOEF2_T2_MASK 0x0F000000
+#define MCDE_FFCOEF2_T2(__x) \
+ MCDE_VAL2REG(MCDE_FFCOEF2, T2, __x)
+#define MCDE_MCDE_WDATAA 0x00000834
+#define MCDE_MCDE_WDATAA_GROUPOFFSET 0x200
+#define MCDE_MCDE_WDATAA_DC_SHIFT 24
+#define MCDE_MCDE_WDATAA_DC_MASK 0x01000000
+#define MCDE_MCDE_WDATAA_DC(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAA, DC, __x)
+#define MCDE_MCDE_WDATAA_DATAVALUE_SHIFT 0
+#define MCDE_MCDE_WDATAA_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_MCDE_WDATAA_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAA, DATAVALUE, __x)
+#define MCDE_MCDE_WDATAB 0x00000A34
+#define MCDE_MCDE_WDATAB_DC_SHIFT 24
+#define MCDE_MCDE_WDATAB_DC_MASK 0x01000000
+#define MCDE_MCDE_WDATAB_DC(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAB, DC, __x)
+#define MCDE_MCDE_WDATAB_DATAVALUE_SHIFT 0
+#define MCDE_MCDE_WDATAB_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_MCDE_WDATAB_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_MCDE_WDATAB, DATAVALUE, __x)
+#define MCDE_TVCRA 0x00000838
+#define MCDE_TVCRA_GROUPOFFSET 0x200
+#define MCDE_TVCRA_SEL_MOD_SHIFT 0
+#define MCDE_TVCRA_SEL_MOD_MASK 0x00000001
+#define MCDE_TVCRA_SEL_MOD_LCD 0
+#define MCDE_TVCRA_SEL_MOD_TV 1
+#define MCDE_TVCRA_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, MCDE_TVCRA_SEL_MOD_##__x)
+#define MCDE_TVCRA_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, __x)
+#define MCDE_TVCRA_INTEREN_SHIFT 1
+#define MCDE_TVCRA_INTEREN_MASK 0x00000002
+#define MCDE_TVCRA_INTEREN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, INTEREN, __x)
+#define MCDE_TVCRA_IFIELD_SHIFT 2
+#define MCDE_TVCRA_IFIELD_MASK 0x00000004
+#define MCDE_TVCRA_IFIELD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, IFIELD, __x)
+#define MCDE_TVCRA_TVMODE_SHIFT 3
+#define MCDE_TVCRA_TVMODE_MASK 0x00000038
+#define MCDE_TVCRA_TVMODE_SDTV_656P 0
+#define MCDE_TVCRA_TVMODE_SDTV_656P_LE 3
+#define MCDE_TVCRA_TVMODE_SDTV_656P_BE 4
+#define MCDE_TVCRA_TVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, TVMODE, MCDE_TVCRA_TVMODE_##__x)
+#define MCDE_TVCRA_TVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, TVMODE, __x)
+#define MCDE_TVCRA_SDTVMODE_SHIFT 6
+#define MCDE_TVCRA_SDTVMODE_MASK 0x000000C0
+#define MCDE_TVCRA_SDTVMODE_Y0CBY1CR 0
+#define MCDE_TVCRA_SDTVMODE_CBY0CRY1 1
+#define MCDE_TVCRA_SDTVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, MCDE_TVCRA_SDTVMODE_##__x)
+#define MCDE_TVCRA_SDTVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, __x)
+#define MCDE_TVCRA_AVRGEN_SHIFT 8
+#define MCDE_TVCRA_AVRGEN_MASK 0x00000100
+#define MCDE_TVCRA_AVRGEN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, AVRGEN, __x)
+#define MCDE_TVCRA_CKINV_SHIFT 9
+#define MCDE_TVCRA_CKINV_MASK 0x00000200
+#define MCDE_TVCRA_CKINV(__x) \
+ MCDE_VAL2REG(MCDE_TVCRA, CKINV, __x)
+#define MCDE_TVCRB 0x00000A38
+#define MCDE_TVCRB_SEL_MOD_SHIFT 0
+#define MCDE_TVCRB_SEL_MOD_MASK 0x00000001
+#define MCDE_TVCRB_SEL_MOD_LCD 0
+#define MCDE_TVCRB_SEL_MOD_TV 1
+#define MCDE_TVCRB_SEL_MOD_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, MCDE_TVCRB_SEL_MOD_##__x)
+#define MCDE_TVCRB_SEL_MOD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, __x)
+#define MCDE_TVCRB_INTEREN_SHIFT 1
+#define MCDE_TVCRB_INTEREN_MASK 0x00000002
+#define MCDE_TVCRB_INTEREN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, INTEREN, __x)
+#define MCDE_TVCRB_IFIELD_SHIFT 2
+#define MCDE_TVCRB_IFIELD_MASK 0x00000004
+#define MCDE_TVCRB_IFIELD(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, IFIELD, __x)
+#define MCDE_TVCRB_TVMODE_SHIFT 3
+#define MCDE_TVCRB_TVMODE_MASK 0x00000038
+#define MCDE_TVCRB_TVMODE_SDTV_656P 0
+#define MCDE_TVCRB_TVMODE_SDTV_656P_LE 3
+#define MCDE_TVCRB_TVMODE_SDTV_656P_BE 4
+#define MCDE_TVCRB_TVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, TVMODE, MCDE_TVCRB_TVMODE_##__x)
+#define MCDE_TVCRB_TVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, TVMODE, __x)
+#define MCDE_TVCRB_SDTVMODE_SHIFT 6
+#define MCDE_TVCRB_SDTVMODE_MASK 0x000000C0
+#define MCDE_TVCRB_SDTVMODE_Y0CBY1CR 0
+#define MCDE_TVCRB_SDTVMODE_CBY0CRY1 1
+#define MCDE_TVCRB_SDTVMODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, MCDE_TVCRB_SDTVMODE_##__x)
+#define MCDE_TVCRB_SDTVMODE(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, __x)
+#define MCDE_TVCRB_AVRGEN_SHIFT 8
+#define MCDE_TVCRB_AVRGEN_MASK 0x00000100
+#define MCDE_TVCRB_AVRGEN(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, AVRGEN, __x)
+#define MCDE_TVCRB_CKINV_SHIFT 9
+#define MCDE_TVCRB_CKINV_MASK 0x00000200
+#define MCDE_TVCRB_CKINV(__x) \
+ MCDE_VAL2REG(MCDE_TVCRB, CKINV, __x)
+#define MCDE_TVBL1A 0x0000083C
+#define MCDE_TVBL1A_GROUPOFFSET 0x200
+#define MCDE_TVBL1A_BEL1_SHIFT 0
+#define MCDE_TVBL1A_BEL1_MASK 0x000007FF
+#define MCDE_TVBL1A_BEL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1A, BEL1, __x)
+#define MCDE_TVBL1A_BSL1_SHIFT 16
+#define MCDE_TVBL1A_BSL1_MASK 0x07FF0000
+#define MCDE_TVBL1A_BSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1A, BSL1, __x)
+#define MCDE_TVBL1B 0x00000A3C
+#define MCDE_TVBL1B_BEL1_SHIFT 0
+#define MCDE_TVBL1B_BEL1_MASK 0x000007FF
+#define MCDE_TVBL1B_BEL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1B, BEL1, __x)
+#define MCDE_TVBL1B_BSL1_SHIFT 16
+#define MCDE_TVBL1B_BSL1_MASK 0x07FF0000
+#define MCDE_TVBL1B_BSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVBL1B, BSL1, __x)
+#define MCDE_TVISLA 0x00000840
+#define MCDE_TVISLA_GROUPOFFSET 0x200
+#define MCDE_TVISLA_FSL1_SHIFT 0
+#define MCDE_TVISLA_FSL1_MASK 0x000007FF
+#define MCDE_TVISLA_FSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVISLA, FSL1, __x)
+#define MCDE_TVISLA_FSL2_SHIFT 16
+#define MCDE_TVISLA_FSL2_MASK 0x07FF0000
+#define MCDE_TVISLA_FSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVISLA, FSL2, __x)
+#define MCDE_TVISLB 0x00000A40
+#define MCDE_TVISLB_FSL1_SHIFT 0
+#define MCDE_TVISLB_FSL1_MASK 0x000007FF
+#define MCDE_TVISLB_FSL1(__x) \
+ MCDE_VAL2REG(MCDE_TVISLB, FSL1, __x)
+#define MCDE_TVISLB_FSL2_SHIFT 16
+#define MCDE_TVISLB_FSL2_MASK 0x07FF0000
+#define MCDE_TVISLB_FSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVISLB, FSL2, __x)
+#define MCDE_TVDVOA 0x00000844
+#define MCDE_TVDVOA_GROUPOFFSET 0x200
+#define MCDE_TVDVOA_DVO1_SHIFT 0
+#define MCDE_TVDVOA_DVO1_MASK 0x000007FF
+#define MCDE_TVDVOA_DVO1(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOA, DVO1, __x)
+#define MCDE_TVDVOA_DVO2_SHIFT 16
+#define MCDE_TVDVOA_DVO2_MASK 0x07FF0000
+#define MCDE_TVDVOA_DVO2(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOA, DVO2, __x)
+#define MCDE_TVDVOB 0x00000A44
+#define MCDE_TVDVOB_DVO1_SHIFT 0
+#define MCDE_TVDVOB_DVO1_MASK 0x000007FF
+#define MCDE_TVDVOB_DVO1(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOB, DVO1, __x)
+#define MCDE_TVDVOB_DVO2_SHIFT 16
+#define MCDE_TVDVOB_DVO2_MASK 0x07FF0000
+#define MCDE_TVDVOB_DVO2(__x) \
+ MCDE_VAL2REG(MCDE_TVDVOB, DVO2, __x)
+#define MCDE_TVTIM1A 0x0000084C
+#define MCDE_TVTIM1A_GROUPOFFSET 0x200
+#define MCDE_TVTIM1A_DHO_SHIFT 0
+#define MCDE_TVTIM1A_DHO_MASK 0x000007FF
+#define MCDE_TVTIM1A_DHO(__x) \
+ MCDE_VAL2REG(MCDE_TVTIM1A, DHO, __x)
+#define MCDE_TVTIM1B 0x00000A4C
+#define MCDE_TVTIM1B_DHO_SHIFT 0
+#define MCDE_TVTIM1B_DHO_MASK 0x000007FF
+#define MCDE_TVTIM1B_DHO(__x) \
+ MCDE_VAL2REG(MCDE_TVTIM1B, DHO, __x)
+#define MCDE_TVLBALWA 0x00000850
+#define MCDE_TVLBALWA_GROUPOFFSET 0x200
+#define MCDE_TVLBALWA_LBW_SHIFT 0
+#define MCDE_TVLBALWA_LBW_MASK 0x000007FF
+#define MCDE_TVLBALWA_LBW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWA, LBW, __x)
+#define MCDE_TVLBALWA_ALW_SHIFT 16
+#define MCDE_TVLBALWA_ALW_MASK 0x07FF0000
+#define MCDE_TVLBALWA_ALW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWA, ALW, __x)
+#define MCDE_TVLBALWB 0x00000A50
+#define MCDE_TVLBALWB_LBW_SHIFT 0
+#define MCDE_TVLBALWB_LBW_MASK 0x000007FF
+#define MCDE_TVLBALWB_LBW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWB, LBW, __x)
+#define MCDE_TVLBALWB_ALW_SHIFT 16
+#define MCDE_TVLBALWB_ALW_MASK 0x07FF0000
+#define MCDE_TVLBALWB_ALW(__x) \
+ MCDE_VAL2REG(MCDE_TVLBALWB, ALW, __x)
+#define MCDE_TVBL2A 0x00000854
+#define MCDE_TVBL2A_GROUPOFFSET 0x200
+#define MCDE_TVBL2A_BEL2_SHIFT 0
+#define MCDE_TVBL2A_BEL2_MASK 0x000007FF
+#define MCDE_TVBL2A_BEL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2A, BEL2, __x)
+#define MCDE_TVBL2A_BSL2_SHIFT 16
+#define MCDE_TVBL2A_BSL2_MASK 0x07FF0000
+#define MCDE_TVBL2A_BSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2A, BSL2, __x)
+#define MCDE_TVBL2B 0x00000A54
+#define MCDE_TVBL2B_BEL2_SHIFT 0
+#define MCDE_TVBL2B_BEL2_MASK 0x000007FF
+#define MCDE_TVBL2B_BEL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2B, BEL2, __x)
+#define MCDE_TVBL2B_BSL2_SHIFT 16
+#define MCDE_TVBL2B_BSL2_MASK 0x07FF0000
+#define MCDE_TVBL2B_BSL2(__x) \
+ MCDE_VAL2REG(MCDE_TVBL2B, BSL2, __x)
+#define MCDE_TVBLUA 0x00000858
+#define MCDE_TVBLUA_GROUPOFFSET 0x200
+#define MCDE_TVBLUA_TVBLU_SHIFT 0
+#define MCDE_TVBLUA_TVBLU_MASK 0x000000FF
+#define MCDE_TVBLUA_TVBLU(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBLU, __x)
+#define MCDE_TVBLUA_TVBCB_SHIFT 8
+#define MCDE_TVBLUA_TVBCB_MASK 0x0000FF00
+#define MCDE_TVBLUA_TVBCB(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBCB, __x)
+#define MCDE_TVBLUA_TVBCR_SHIFT 16
+#define MCDE_TVBLUA_TVBCR_MASK 0x00FF0000
+#define MCDE_TVBLUA_TVBCR(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUA, TVBCR, __x)
+#define MCDE_TVBLUB 0x00000A58
+#define MCDE_TVBLUB_TVBLU_SHIFT 0
+#define MCDE_TVBLUB_TVBLU_MASK 0x000000FF
+#define MCDE_TVBLUB_TVBLU(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBLU, __x)
+#define MCDE_TVBLUB_TVBCB_SHIFT 8
+#define MCDE_TVBLUB_TVBCB_MASK 0x0000FF00
+#define MCDE_TVBLUB_TVBCB(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBCB, __x)
+#define MCDE_TVBLUB_TVBCR_SHIFT 16
+#define MCDE_TVBLUB_TVBCR_MASK 0x00FF0000
+#define MCDE_TVBLUB_TVBCR(__x) \
+ MCDE_VAL2REG(MCDE_TVBLUB, TVBCR, __x)
+#define MCDE_LCDTIM1A 0x00000860
+#define MCDE_LCDTIM1A_GROUPOFFSET 0x200
+#define MCDE_LCDTIM1A_IVP_SHIFT 19
+#define MCDE_LCDTIM1A_IVP_MASK 0x00080000
+#define MCDE_LCDTIM1A_IVP(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IVP, __x)
+#define MCDE_LCDTIM1A_IVS_SHIFT 20
+#define MCDE_LCDTIM1A_IVS_MASK 0x00100000
+#define MCDE_LCDTIM1A_IVS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IVS, __x)
+#define MCDE_LCDTIM1A_IHS_SHIFT 21
+#define MCDE_LCDTIM1A_IHS_MASK 0x00200000
+#define MCDE_LCDTIM1A_IHS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IHS, __x)
+#define MCDE_LCDTIM1A_IPC_SHIFT 22
+#define MCDE_LCDTIM1A_IPC_MASK 0x00400000
+#define MCDE_LCDTIM1A_IPC(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IPC, __x)
+#define MCDE_LCDTIM1A_IOE_SHIFT 23
+#define MCDE_LCDTIM1A_IOE_MASK 0x00800000
+#define MCDE_LCDTIM1A_IOE(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1A, IOE, __x)
+#define MCDE_LCDTIM1B 0x00000A60
+#define MCDE_LCDTIM1B_IVP_SHIFT 19
+#define MCDE_LCDTIM1B_IVP_MASK 0x00080000
+#define MCDE_LCDTIM1B_IVP(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IVP, __x)
+#define MCDE_LCDTIM1B_IVS_SHIFT 20
+#define MCDE_LCDTIM1B_IVS_MASK 0x00100000
+#define MCDE_LCDTIM1B_IVS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IVS, __x)
+#define MCDE_LCDTIM1B_IHS_SHIFT 21
+#define MCDE_LCDTIM1B_IHS_MASK 0x00200000
+#define MCDE_LCDTIM1B_IHS(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IHS, __x)
+#define MCDE_LCDTIM1B_IPC_SHIFT 22
+#define MCDE_LCDTIM1B_IPC_MASK 0x00400000
+#define MCDE_LCDTIM1B_IPC(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IPC, __x)
+#define MCDE_LCDTIM1B_IOE_SHIFT 23
+#define MCDE_LCDTIM1B_IOE_MASK 0x00800000
+#define MCDE_LCDTIM1B_IOE(__x) \
+ MCDE_VAL2REG(MCDE_LCDTIM1B, IOE, __x)
+#define MCDE_DITCTRLA 0x00000864
+#define MCDE_DITCTRLA_GROUPOFFSET 0x200
+#define MCDE_DITCTRLA_TEMP_SHIFT 0
+#define MCDE_DITCTRLA_TEMP_MASK 0x00000001
+#define MCDE_DITCTRLA_TEMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, TEMP, __x)
+#define MCDE_DITCTRLA_COMP_SHIFT 1
+#define MCDE_DITCTRLA_COMP_MASK 0x00000002
+#define MCDE_DITCTRLA_COMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, COMP, __x)
+#define MCDE_DITCTRLA_MODE_SHIFT 2
+#define MCDE_DITCTRLA_MODE_MASK 0x0000000C
+#define MCDE_DITCTRLA_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, MODE, __x)
+#define MCDE_DITCTRLA_MASK_SHIFT 4
+#define MCDE_DITCTRLA_MASK_MASK 0x00000010
+#define MCDE_DITCTRLA_MASK(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, MASK, __x)
+#define MCDE_DITCTRLA_FOFFX_SHIFT 5
+#define MCDE_DITCTRLA_FOFFX_MASK 0x000003E0
+#define MCDE_DITCTRLA_FOFFX(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, FOFFX, __x)
+#define MCDE_DITCTRLA_FOFFY_SHIFT 10
+#define MCDE_DITCTRLA_FOFFY_MASK 0x00007C00
+#define MCDE_DITCTRLA_FOFFY(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLA, FOFFY, __x)
+#define MCDE_DITCTRLB 0x00000A64
+#define MCDE_DITCTRLB_TEMP_SHIFT 0
+#define MCDE_DITCTRLB_TEMP_MASK 0x00000001
+#define MCDE_DITCTRLB_TEMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, TEMP, __x)
+#define MCDE_DITCTRLB_COMP_SHIFT 1
+#define MCDE_DITCTRLB_COMP_MASK 0x00000002
+#define MCDE_DITCTRLB_COMP(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, COMP, __x)
+#define MCDE_DITCTRLB_MODE_SHIFT 2
+#define MCDE_DITCTRLB_MODE_MASK 0x0000000C
+#define MCDE_DITCTRLB_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, MODE, __x)
+#define MCDE_DITCTRLB_MASK_SHIFT 4
+#define MCDE_DITCTRLB_MASK_MASK 0x00000010
+#define MCDE_DITCTRLB_MASK(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, MASK, __x)
+#define MCDE_DITCTRLB_FOFFX_SHIFT 5
+#define MCDE_DITCTRLB_FOFFX_MASK 0x000003E0
+#define MCDE_DITCTRLB_FOFFX(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, FOFFX, __x)
+#define MCDE_DITCTRLB_FOFFY_SHIFT 10
+#define MCDE_DITCTRLB_FOFFY_MASK 0x00007C00
+#define MCDE_DITCTRLB_FOFFY(__x) \
+ MCDE_VAL2REG(MCDE_DITCTRLB, FOFFY, __x)
+#define MCDE_DITOFFA 0x00000868
+#define MCDE_DITOFFA_GROUPOFFSET 0x200
+#define MCDE_DITOFFA_XG_SHIFT 0
+#define MCDE_DITOFFA_XG_MASK 0x0000001F
+#define MCDE_DITOFFA_XG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, XG, __x)
+#define MCDE_DITOFFA_YG_SHIFT 8
+#define MCDE_DITOFFA_YG_MASK 0x00001F00
+#define MCDE_DITOFFA_YG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, YG, __x)
+#define MCDE_DITOFFA_XB_SHIFT 16
+#define MCDE_DITOFFA_XB_MASK 0x001F0000
+#define MCDE_DITOFFA_XB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, XB, __x)
+#define MCDE_DITOFFA_YB_SHIFT 24
+#define MCDE_DITOFFA_YB_MASK 0x1F000000
+#define MCDE_DITOFFA_YB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFA, YB, __x)
+#define MCDE_DITOFFB 0x00000A68
+#define MCDE_DITOFFB_XG_SHIFT 0
+#define MCDE_DITOFFB_XG_MASK 0x0000001F
+#define MCDE_DITOFFB_XG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, XG, __x)
+#define MCDE_DITOFFB_YG_SHIFT 8
+#define MCDE_DITOFFB_YG_MASK 0x00001F00
+#define MCDE_DITOFFB_YG(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, YG, __x)
+#define MCDE_DITOFFB_XB_SHIFT 16
+#define MCDE_DITOFFB_XB_MASK 0x001F0000
+#define MCDE_DITOFFB_XB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, XB, __x)
+#define MCDE_DITOFFB_YB_SHIFT 24
+#define MCDE_DITOFFB_YB_MASK 0x1F000000
+#define MCDE_DITOFFB_YB(__x) \
+ MCDE_VAL2REG(MCDE_DITOFFB, YB, __x)
+#define MCDE_PAL0A 0x0000086C
+#define MCDE_PAL0A_GROUPOFFSET 0x200
+#define MCDE_PAL0A_BLUE_SHIFT 0
+#define MCDE_PAL0A_BLUE_MASK 0x00000FFF
+#define MCDE_PAL0A_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_PAL0A, BLUE, __x)
+#define MCDE_PAL0A_GREEN_SHIFT 16
+#define MCDE_PAL0A_GREEN_MASK 0x0FFF0000
+#define MCDE_PAL0A_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_PAL0A, GREEN, __x)
+#define MCDE_PAL0B 0x00000A6C
+#define MCDE_PAL0B_BLUE_SHIFT 0
+#define MCDE_PAL0B_BLUE_MASK 0x00000FFF
+#define MCDE_PAL0B_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_PAL0B, BLUE, __x)
+#define MCDE_PAL0B_GREEN_SHIFT 16
+#define MCDE_PAL0B_GREEN_MASK 0x0FFF0000
+#define MCDE_PAL0B_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_PAL0B, GREEN, __x)
+#define MCDE_PAL1A 0x00000870
+#define MCDE_PAL1A_GROUPOFFSET 0x200
+#define MCDE_PAL1A_RED_SHIFT 0
+#define MCDE_PAL1A_RED_MASK 0x00000FFF
+#define MCDE_PAL1A_RED(__x) \
+ MCDE_VAL2REG(MCDE_PAL1A, RED, __x)
+#define MCDE_PAL1B 0x00000A70
+#define MCDE_PAL1B_RED_SHIFT 0
+#define MCDE_PAL1B_RED_MASK 0x00000FFF
+#define MCDE_PAL1B_RED(__x) \
+ MCDE_VAL2REG(MCDE_PAL1B, RED, __x)
+#define MCDE_ROTADD0A 0x00000874
+#define MCDE_ROTADD0A_GROUPOFFSET 0x200
+#define MCDE_ROTADD0A_ROTADD0_SHIFT 3
+#define MCDE_ROTADD0A_ROTADD0_MASK 0xFFFFFFF8
+#define MCDE_ROTADD0A_ROTADD0(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD0A, ROTADD0, __x)
+#define MCDE_ROTADD0B 0x00000A74
+#define MCDE_ROTADD0B_ROTADD0_SHIFT 3
+#define MCDE_ROTADD0B_ROTADD0_MASK 0xFFFFFFF8
+#define MCDE_ROTADD0B_ROTADD0(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD0B, ROTADD0, __x)
+#define MCDE_ROTADD1A 0x00000878
+#define MCDE_ROTADD1A_GROUPOFFSET 0x200
+#define MCDE_ROTADD1A_ROTADD1_SHIFT 3
+#define MCDE_ROTADD1A_ROTADD1_MASK 0xFFFFFFF8
+#define MCDE_ROTADD1A_ROTADD1(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD1A, ROTADD1, __x)
+#define MCDE_ROTADD1B 0x00000A78
+#define MCDE_ROTADD1B_ROTADD1_SHIFT 3
+#define MCDE_ROTADD1B_ROTADD1_MASK 0xFFFFFFF8
+#define MCDE_ROTADD1B_ROTADD1(__x) \
+ MCDE_VAL2REG(MCDE_ROTADD1B, ROTADD1, __x)
+#define MCDE_ROTACONF 0x0000087C
+#define MCDE_ROTACONF_GROUPOFFSET 0x200
+#define MCDE_ROTACONF_ROTBURSTSIZE_SHIFT 0
+#define MCDE_ROTACONF_ROTBURSTSIZE_MASK 0x00000007
+#define MCDE_ROTACONF_ROTBURSTSIZE_1W 0
+#define MCDE_ROTACONF_ROTBURSTSIZE_2W 1
+#define MCDE_ROTACONF_ROTBURSTSIZE_4W 2
+#define MCDE_ROTACONF_ROTBURSTSIZE_8W 3
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_1W 4
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_2W 5
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_4W 6
+#define MCDE_ROTACONF_ROTBURSTSIZE_HW_8W 7
+#define MCDE_ROTACONF_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, \
+ MCDE_ROTACONF_ROTBURSTSIZE_##__x)
+#define MCDE_ROTACONF_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, __x)
+#define MCDE_ROTACONF_ROTDIR_SHIFT 3
+#define MCDE_ROTACONF_ROTDIR_MASK 0x00000008
+#define MCDE_ROTACONF_ROTDIR_CCW 0
+#define MCDE_ROTACONF_ROTDIR_CW 1
+#define MCDE_ROTACONF_ROTDIR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, MCDE_ROTACONF_ROTDIR_##__x)
+#define MCDE_ROTACONF_ROTDIR(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, __x)
+#define MCDE_ROTACONF_WR_MAXOUT_SHIFT 4
+#define MCDE_ROTACONF_WR_MAXOUT_MASK 0x00000030
+#define MCDE_ROTACONF_WR_MAXOUT_1_REQ 0
+#define MCDE_ROTACONF_WR_MAXOUT_2_REQ 1
+#define MCDE_ROTACONF_WR_MAXOUT_4_REQ 2
+#define MCDE_ROTACONF_WR_MAXOUT_8_REQ 3
+#define MCDE_ROTACONF_WR_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, MCDE_ROTACONF_WR_MAXOUT_##__x)
+#define MCDE_ROTACONF_WR_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, __x)
+#define MCDE_ROTACONF_RD_MAXOUT_SHIFT 6
+#define MCDE_ROTACONF_RD_MAXOUT_MASK 0x000000C0
+#define MCDE_ROTACONF_RD_MAXOUT_1_REQ 0
+#define MCDE_ROTACONF_RD_MAXOUT_2_REQ 1
+#define MCDE_ROTACONF_RD_MAXOUT_4_REQ 2
+#define MCDE_ROTACONF_RD_MAXOUT_8_REQ 3
+#define MCDE_ROTACONF_RD_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, MCDE_ROTACONF_RD_MAXOUT_##__x)
+#define MCDE_ROTACONF_RD_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, __x)
+#define MCDE_ROTACONF_STRIP_WIDTH_SHIFT 8
+#define MCDE_ROTACONF_STRIP_WIDTH_MASK 0x00007F00
+#define MCDE_ROTACONF_STRIP_WIDTH_2PIX 0
+#define MCDE_ROTACONF_STRIP_WIDTH_4PIX 1
+#define MCDE_ROTACONF_STRIP_WIDTH_8PIX 2
+#define MCDE_ROTACONF_STRIP_WIDTH_16PIX 3
+#define MCDE_ROTACONF_STRIP_WIDTH_32PIX 4
+#define MCDE_ROTACONF_STRIP_WIDTH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, \
+ MCDE_ROTACONF_STRIP_WIDTH_##__x)
+#define MCDE_ROTACONF_STRIP_WIDTH(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, __x)
+#define MCDE_ROTACONF_SINGLE_BUF_SHIFT 15
+#define MCDE_ROTACONF_SINGLE_BUF_MASK 0x00008000
+#define MCDE_ROTACONF_SINGLE_BUF(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, SINGLE_BUF, __x)
+#define MCDE_ROTACONF_WR_ROPC_SHIFT 16
+#define MCDE_ROTACONF_WR_ROPC_MASK 0x00FF0000
+#define MCDE_ROTACONF_WR_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, WR_ROPC, __x)
+#define MCDE_ROTACONF_RD_ROPC_SHIFT 24
+#define MCDE_ROTACONF_RD_ROPC_MASK 0xFF000000
+#define MCDE_ROTACONF_RD_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTACONF, RD_ROPC, __x)
+#define MCDE_ROTBCONF 0x00000A7C
+#define MCDE_ROTBCONF_ROTBURSTSIZE_SHIFT 0
+#define MCDE_ROTBCONF_ROTBURSTSIZE_MASK 0x00000007
+#define MCDE_ROTBCONF_ROTBURSTSIZE_1W 0
+#define MCDE_ROTBCONF_ROTBURSTSIZE_2W 1
+#define MCDE_ROTBCONF_ROTBURSTSIZE_4W 2
+#define MCDE_ROTBCONF_ROTBURSTSIZE_8W 3
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_1W 4
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_2W 5
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_4W 6
+#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_8W 7
+#define MCDE_ROTBCONF_ROTBURSTSIZE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, \
+ MCDE_ROTBCONF_ROTBURSTSIZE_##__x)
+#define MCDE_ROTBCONF_ROTBURSTSIZE(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, __x)
+#define MCDE_ROTBCONF_ROTDIR_SHIFT 3
+#define MCDE_ROTBCONF_ROTDIR_MASK 0x00000008
+#define MCDE_ROTBCONF_ROTDIR_CCW 0
+#define MCDE_ROTBCONF_ROTDIR_CW 1
+#define MCDE_ROTBCONF_ROTDIR_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, MCDE_ROTBCONF_ROTDIR_##__x)
+#define MCDE_ROTBCONF_ROTDIR(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, __x)
+#define MCDE_ROTBCONF_WR_MAXOUT_SHIFT 4
+#define MCDE_ROTBCONF_WR_MAXOUT_MASK 0x00000030
+#define MCDE_ROTBCONF_WR_MAXOUT_1_REQ 0
+#define MCDE_ROTBCONF_WR_MAXOUT_2_REQ 1
+#define MCDE_ROTBCONF_WR_MAXOUT_4_REQ 2
+#define MCDE_ROTBCONF_WR_MAXOUT_8_REQ 3
+#define MCDE_ROTBCONF_WR_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, MCDE_ROTBCONF_WR_MAXOUT_##__x)
+#define MCDE_ROTBCONF_WR_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, __x)
+#define MCDE_ROTBCONF_RD_MAXOUT_SHIFT 6
+#define MCDE_ROTBCONF_RD_MAXOUT_MASK 0x000000C0
+#define MCDE_ROTBCONF_RD_MAXOUT_1_REQ 0
+#define MCDE_ROTBCONF_RD_MAXOUT_2_REQ 1
+#define MCDE_ROTBCONF_RD_MAXOUT_4_REQ 2
+#define MCDE_ROTBCONF_RD_MAXOUT_8_REQ 3
+#define MCDE_ROTBCONF_RD_MAXOUT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, MCDE_ROTBCONF_RD_MAXOUT_##__x)
+#define MCDE_ROTBCONF_RD_MAXOUT(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, __x)
+#define MCDE_ROTBCONF_STRIP_WIDTH_SHIFT 8
+#define MCDE_ROTBCONF_STRIP_WIDTH_MASK 0x00007F00
+#define MCDE_ROTBCONF_STRIP_WIDTH_2PIX 0
+#define MCDE_ROTBCONF_STRIP_WIDTH_4PIX 1
+#define MCDE_ROTBCONF_STRIP_WIDTH_8PIX 2
+#define MCDE_ROTBCONF_STRIP_WIDTH_16PIX 3
+#define MCDE_ROTBCONF_STRIP_WIDTH_32PIX 4
+#define MCDE_ROTBCONF_STRIP_WIDTH_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, \
+ MCDE_ROTBCONF_STRIP_WIDTH_##__x)
+#define MCDE_ROTBCONF_STRIP_WIDTH(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, __x)
+#define MCDE_ROTBCONF_SINGLE_BUF_SHIFT 15
+#define MCDE_ROTBCONF_SINGLE_BUF_MASK 0x00008000
+#define MCDE_ROTBCONF_SINGLE_BUF(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, SINGLE_BUF, __x)
+#define MCDE_ROTBCONF_WR_ROPC_SHIFT 16
+#define MCDE_ROTBCONF_WR_ROPC_MASK 0x00FF0000
+#define MCDE_ROTBCONF_WR_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, WR_ROPC, __x)
+#define MCDE_ROTBCONF_RD_ROPC_SHIFT 24
+#define MCDE_ROTBCONF_RD_ROPC_MASK 0xFF000000
+#define MCDE_ROTBCONF_RD_ROPC(__x) \
+ MCDE_VAL2REG(MCDE_ROTBCONF, RD_ROPC, __x)
+#define MCDE_SYNCHCONFA 0x00000880
+#define MCDE_SYNCHCONFA_GROUPOFFSET 0x200
+#define MCDE_SYNCHCONFA_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONFA_HWREQVEVENT_MASK 0x00000003
+#define MCDE_SYNCHCONFA_HWREQVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFA_HWREQVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFA_HWREQVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFA_HWREQVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, \
+ MCDE_SYNCHCONFA_HWREQVEVENT_##__x)
+#define MCDE_SYNCHCONFA_HWREQVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, __x)
+#define MCDE_SYNCHCONFA_HWREQVCNT_SHIFT 2
+#define MCDE_SYNCHCONFA_HWREQVCNT_MASK 0x0000FFFC
+#define MCDE_SYNCHCONFA_HWREQVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVCNT, __x)
+#define MCDE_SYNCHCONFA_SWINTVEVENT_SHIFT 16
+#define MCDE_SYNCHCONFA_SWINTVEVENT_MASK 0x00030000
+#define MCDE_SYNCHCONFA_SWINTVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFA_SWINTVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFA_SWINTVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFA_SWINTVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, \
+ MCDE_SYNCHCONFA_SWINTVEVENT_##__x)
+#define MCDE_SYNCHCONFA_SWINTVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, __x)
+#define MCDE_SYNCHCONFA_SWINTVCNT_SHIFT 18
+#define MCDE_SYNCHCONFA_SWINTVCNT_MASK 0xFFFC0000
+#define MCDE_SYNCHCONFA_SWINTVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVCNT, __x)
+#define MCDE_SYNCHCONFB 0x00000A80
+#define MCDE_SYNCHCONFB_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONFB_HWREQVEVENT_MASK 0x00000003
+#define MCDE_SYNCHCONFB_HWREQVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFB_HWREQVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFB_HWREQVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFB_HWREQVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, \
+ MCDE_SYNCHCONFB_HWREQVEVENT_##__x)
+#define MCDE_SYNCHCONFB_HWREQVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, __x)
+#define MCDE_SYNCHCONFB_HWREQVCNT_SHIFT 2
+#define MCDE_SYNCHCONFB_HWREQVCNT_MASK 0x0000FFFC
+#define MCDE_SYNCHCONFB_HWREQVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVCNT, __x)
+#define MCDE_SYNCHCONFB_SWINTVEVENT_SHIFT 16
+#define MCDE_SYNCHCONFB_SWINTVEVENT_MASK 0x00030000
+#define MCDE_SYNCHCONFB_SWINTVEVENT_VSYNC 0
+#define MCDE_SYNCHCONFB_SWINTVEVENT_BACK_PORCH 1
+#define MCDE_SYNCHCONFB_SWINTVEVENT_ACTIVE_VIDEO 2
+#define MCDE_SYNCHCONFB_SWINTVEVENT_FRONT_PORCH 3
+#define MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, \
+ MCDE_SYNCHCONFB_SWINTVEVENT_##__x)
+#define MCDE_SYNCHCONFB_SWINTVEVENT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, __x)
+#define MCDE_SYNCHCONFB_SWINTVCNT_SHIFT 18
+#define MCDE_SYNCHCONFB_SWINTVCNT_MASK 0xFFFC0000
+#define MCDE_SYNCHCONFB_SWINTVCNT(__x) \
+ MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVCNT, __x)
+#define MCDE_CTRLA 0x00000884
+#define MCDE_CTRLA_GROUPOFFSET 0x200
+#define MCDE_CTRLA_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLA_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLA_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOWTRMRK, __x)
+#define MCDE_CTRLA_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLA_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLA_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOEMPTY, __x)
+#define MCDE_CTRLA_FIFOFULL_SHIFT 13
+#define MCDE_CTRLA_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLA_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FIFOFULL, __x)
+#define MCDE_CTRLA_FORMID_SHIFT 16
+#define MCDE_CTRLA_FORMID_MASK 0x00070000
+#define MCDE_CTRLA_FORMID_DSI0VID 0
+#define MCDE_CTRLA_FORMID_DSI0CMD 1
+#define MCDE_CTRLA_FORMID_DSI1VID 2
+#define MCDE_CTRLA_FORMID_DSI1CMD 3
+#define MCDE_CTRLA_FORMID_DSI2VID 4
+#define MCDE_CTRLA_FORMID_DSI2CMD 5
+#define MCDE_CTRLA_FORMID_DPIA 0
+#define MCDE_CTRLA_FORMID_DPIB 1
+#define MCDE_CTRLA_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMID, MCDE_CTRLA_FORMID_##__x)
+#define MCDE_CTRLA_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMID, __x)
+#define MCDE_CTRLA_FORMTYPE_SHIFT 20
+#define MCDE_CTRLA_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLA_FORMTYPE_DPITV 0
+#define MCDE_CTRLA_FORMTYPE_DBI 1
+#define MCDE_CTRLA_FORMTYPE_DSI 2
+#define MCDE_CTRLA_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, MCDE_CTRLA_FORMTYPE_##__x)
+#define MCDE_CTRLA_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, __x)
+#define MCDE_CTRLB 0x00000A84
+#define MCDE_CTRLB_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLB_FIFOWTRMRK_MASK 0x000003FF
+#define MCDE_CTRLB_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOWTRMRK, __x)
+#define MCDE_CTRLB_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLB_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLB_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOEMPTY, __x)
+#define MCDE_CTRLB_FIFOFULL_SHIFT 13
+#define MCDE_CTRLB_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLB_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FIFOFULL, __x)
+#define MCDE_CTRLB_FORMID_SHIFT 16
+#define MCDE_CTRLB_FORMID_MASK 0x00070000
+#define MCDE_CTRLB_FORMID_DSI0VID 0
+#define MCDE_CTRLB_FORMID_DSI0CMD 1
+#define MCDE_CTRLB_FORMID_DSI1VID 2
+#define MCDE_CTRLB_FORMID_DSI1CMD 3
+#define MCDE_CTRLB_FORMID_DSI2VID 4
+#define MCDE_CTRLB_FORMID_DSI2CMD 5
+#define MCDE_CTRLB_FORMID_DPIA 0
+#define MCDE_CTRLB_FORMID_DPIB 1
+#define MCDE_CTRLB_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMID, MCDE_CTRLB_FORMID_##__x)
+#define MCDE_CTRLB_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMID, __x)
+#define MCDE_CTRLB_FORMTYPE_SHIFT 20
+#define MCDE_CTRLB_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLB_FORMTYPE_DPITV 0
+#define MCDE_CTRLB_FORMTYPE_DBI 1
+#define MCDE_CTRLB_FORMTYPE_DSI 2
+#define MCDE_CTRLB_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, MCDE_CTRLB_FORMTYPE_##__x)
+#define MCDE_CTRLB_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, __x)
+#define MCDE_GAM0A 0x00000888
+#define MCDE_GAM0A_GROUPOFFSET 0x200
+#define MCDE_GAM0A_BLUE_SHIFT 0
+#define MCDE_GAM0A_BLUE_MASK 0x00FFFFFF
+#define MCDE_GAM0A_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_GAM0A, BLUE, __x)
+#define MCDE_GAM0B 0x00000A88
+#define MCDE_GAM0B_BLUE_SHIFT 0
+#define MCDE_GAM0B_BLUE_MASK 0x00FFFFFF
+#define MCDE_GAM0B_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_GAM0B, BLUE, __x)
+#define MCDE_GAM1A 0x0000088C
+#define MCDE_GAM1A_GROUPOFFSET 0x200
+#define MCDE_GAM1A_GREEN_SHIFT 0
+#define MCDE_GAM1A_GREEN_MASK 0x00FFFFFF
+#define MCDE_GAM1A_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_GAM1A, GREEN, __x)
+#define MCDE_GAM1B 0x00000A8C
+#define MCDE_GAM1B_GREEN_SHIFT 0
+#define MCDE_GAM1B_GREEN_MASK 0x00FFFFFF
+#define MCDE_GAM1B_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_GAM1B, GREEN, __x)
+#define MCDE_GAM2A 0x00000890
+#define MCDE_GAM2A_GROUPOFFSET 0x200
+#define MCDE_GAM2A_RED_SHIFT 0
+#define MCDE_GAM2A_RED_MASK 0x00FFFFFF
+#define MCDE_GAM2A_RED(__x) \
+ MCDE_VAL2REG(MCDE_GAM2A, RED, __x)
+#define MCDE_GAM2B 0x00000A90
+#define MCDE_GAM2B_RED_SHIFT 0
+#define MCDE_GAM2B_RED_MASK 0x00FFFFFF
+#define MCDE_GAM2B_RED(__x) \
+ MCDE_VAL2REG(MCDE_GAM2B, RED, __x)
+#define MCDE_OLEDCONV1A 0x00000894
+#define MCDE_OLEDCONV1A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV1A_ALPHA_RED_SHIFT 0
+#define MCDE_OLEDCONV1A_ALPHA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV1A_ALPHA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_RED, __x)
+#define MCDE_OLEDCONV1A_ALPHA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV1A_ALPHA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV1A_ALPHA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_GREEN, __x)
+#define MCDE_OLEDCONV1B 0x00000A94
+#define MCDE_OLEDCONV1B_ALPHA_RED_SHIFT 0
+#define MCDE_OLEDCONV1B_ALPHA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV1B_ALPHA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_RED, __x)
+#define MCDE_OLEDCONV1B_ALPHA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV1B_ALPHA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV1B_ALPHA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_GREEN, __x)
+#define MCDE_OLEDCONV2A 0x00000898
+#define MCDE_OLEDCONV2A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV2A_ALPHA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV2A_ALPHA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV2A_ALPHA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2A, ALPHA_BLUE, __x)
+#define MCDE_OLEDCONV2A_BETA_RED_SHIFT 16
+#define MCDE_OLEDCONV2A_BETA_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV2A_BETA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2A, BETA_RED, __x)
+#define MCDE_OLEDCONV2B 0x00000A98
+#define MCDE_OLEDCONV2B_ALPHA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV2B_ALPHA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV2B_ALPHA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2B, ALPHA_BLUE, __x)
+#define MCDE_OLEDCONV2B_BETA_RED_SHIFT 16
+#define MCDE_OLEDCONV2B_BETA_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV2B_BETA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV2B, BETA_RED, __x)
+#define MCDE_OLEDCONV3A 0x0000089C
+#define MCDE_OLEDCONV3A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV3A_BETA_GREEN_SHIFT 0
+#define MCDE_OLEDCONV3A_BETA_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV3A_BETA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_GREEN, __x)
+#define MCDE_OLEDCONV3A_BETA_BLUE_SHIFT 16
+#define MCDE_OLEDCONV3A_BETA_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV3A_BETA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_BLUE, __x)
+#define MCDE_OLEDCONV3B 0x00000A9C
+#define MCDE_OLEDCONV3B_BETA_GREEN_SHIFT 0
+#define MCDE_OLEDCONV3B_BETA_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV3B_BETA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_GREEN, __x)
+#define MCDE_OLEDCONV3B_BETA_BLUE_SHIFT 16
+#define MCDE_OLEDCONV3B_BETA_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV3B_BETA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_BLUE, __x)
+#define MCDE_OLEDCONV4A 0x000008A0
+#define MCDE_OLEDCONV4A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV4A_GAMMA_RED_SHIFT 0
+#define MCDE_OLEDCONV4A_GAMMA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV4A_GAMMA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_RED, __x)
+#define MCDE_OLEDCONV4A_GAMMA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV4A_GAMMA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV4A_GAMMA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_GREEN, __x)
+#define MCDE_OLEDCONV4B 0x00000AA0
+#define MCDE_OLEDCONV4B_GAMMA_RED_SHIFT 0
+#define MCDE_OLEDCONV4B_GAMMA_RED_MASK 0x00003FFF
+#define MCDE_OLEDCONV4B_GAMMA_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_RED, __x)
+#define MCDE_OLEDCONV4B_GAMMA_GREEN_SHIFT 16
+#define MCDE_OLEDCONV4B_GAMMA_GREEN_MASK 0x3FFF0000
+#define MCDE_OLEDCONV4B_GAMMA_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_GREEN, __x)
+#define MCDE_OLEDCONV5A 0x000008A4
+#define MCDE_OLEDCONV5A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV5A_GAMMA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV5A_GAMMA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV5A_GAMMA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5A, GAMMA_BLUE, __x)
+#define MCDE_OLEDCONV5A_OFF_RED_SHIFT 16
+#define MCDE_OLEDCONV5A_OFF_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV5A_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5A, OFF_RED, __x)
+#define MCDE_OLEDCONV5B 0x00000AA4
+#define MCDE_OLEDCONV5B_GAMMA_BLUE_SHIFT 0
+#define MCDE_OLEDCONV5B_GAMMA_BLUE_MASK 0x00003FFF
+#define MCDE_OLEDCONV5B_GAMMA_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5B, GAMMA_BLUE, __x)
+#define MCDE_OLEDCONV5B_OFF_RED_SHIFT 16
+#define MCDE_OLEDCONV5B_OFF_RED_MASK 0x3FFF0000
+#define MCDE_OLEDCONV5B_OFF_RED(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV5B, OFF_RED, __x)
+#define MCDE_OLEDCONV6A 0x000008A8
+#define MCDE_OLEDCONV6A_GROUPOFFSET 0x200
+#define MCDE_OLEDCONV6A_OFF_GREEN_SHIFT 0
+#define MCDE_OLEDCONV6A_OFF_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV6A_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_GREEN, __x)
+#define MCDE_OLEDCONV6A_OFF_BLUE_SHIFT 16
+#define MCDE_OLEDCONV6A_OFF_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV6A_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_BLUE, __x)
+#define MCDE_OLEDCONV6B 0x00000AA8
+#define MCDE_OLEDCONV6B_OFF_GREEN_SHIFT 0
+#define MCDE_OLEDCONV6B_OFF_GREEN_MASK 0x00003FFF
+#define MCDE_OLEDCONV6B_OFF_GREEN(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_GREEN, __x)
+#define MCDE_OLEDCONV6B_OFF_BLUE_SHIFT 16
+#define MCDE_OLEDCONV6B_OFF_BLUE_MASK 0x3FFF0000
+#define MCDE_OLEDCONV6B_OFF_BLUE(__x) \
+ MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_BLUE, __x)
+#define MCDE_CRC 0x00000C00
+#define MCDE_CRC_C1EN_SHIFT 2
+#define MCDE_CRC_C1EN_MASK 0x00000004
+#define MCDE_CRC_C1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, C1EN, __x)
+#define MCDE_CRC_C2EN_SHIFT 3
+#define MCDE_CRC_C2EN_MASK 0x00000008
+#define MCDE_CRC_C2EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, C2EN, __x)
+#define MCDE_CRC_SYCEN0_SHIFT 7
+#define MCDE_CRC_SYCEN0_MASK 0x00000080
+#define MCDE_CRC_SYCEN0(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYCEN0, __x)
+#define MCDE_CRC_SYCEN1_SHIFT 8
+#define MCDE_CRC_SYCEN1_MASK 0x00000100
+#define MCDE_CRC_SYCEN1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYCEN1, __x)
+#define MCDE_CRC_SIZE1_SHIFT 9
+#define MCDE_CRC_SIZE1_MASK 0x00000200
+#define MCDE_CRC_SIZE1(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SIZE1, __x)
+#define MCDE_CRC_SIZE2_SHIFT 10
+#define MCDE_CRC_SIZE2_MASK 0x00000400
+#define MCDE_CRC_SIZE2(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SIZE2, __x)
+#define MCDE_CRC_YUVCONVC1EN_SHIFT 15
+#define MCDE_CRC_YUVCONVC1EN_MASK 0x00008000
+#define MCDE_CRC_YUVCONVC1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, YUVCONVC1EN, __x)
+#define MCDE_CRC_CS1EN_SHIFT 16
+#define MCDE_CRC_CS1EN_MASK 0x00010000
+#define MCDE_CRC_CS1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS1EN, __x)
+#define MCDE_CRC_CS2EN_SHIFT 17
+#define MCDE_CRC_CS2EN_MASK 0x00020000
+#define MCDE_CRC_CS2EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS2EN, __x)
+#define MCDE_CRC_CS1POL_SHIFT 19
+#define MCDE_CRC_CS1POL_MASK 0x00080000
+#define MCDE_CRC_CS1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS1POL, __x)
+#define MCDE_CRC_CS2POL_SHIFT 20
+#define MCDE_CRC_CS2POL_MASK 0x00100000
+#define MCDE_CRC_CS2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CS2POL, __x)
+#define MCDE_CRC_CD1POL_SHIFT 21
+#define MCDE_CRC_CD1POL_MASK 0x00200000
+#define MCDE_CRC_CD1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CD1POL, __x)
+#define MCDE_CRC_CD2POL_SHIFT 22
+#define MCDE_CRC_CD2POL_MASK 0x00400000
+#define MCDE_CRC_CD2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CD2POL, __x)
+#define MCDE_CRC_WR1POL_SHIFT 23
+#define MCDE_CRC_WR1POL_MASK 0x00800000
+#define MCDE_CRC_WR1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WR1POL, __x)
+#define MCDE_CRC_WR2POL_SHIFT 24
+#define MCDE_CRC_WR2POL_MASK 0x01000000
+#define MCDE_CRC_WR2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, WR2POL, __x)
+#define MCDE_CRC_RD1POL_SHIFT 25
+#define MCDE_CRC_RD1POL_MASK 0x02000000
+#define MCDE_CRC_RD1POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RD1POL, __x)
+#define MCDE_CRC_RD2POL_SHIFT 26
+#define MCDE_CRC_RD2POL_MASK 0x04000000
+#define MCDE_CRC_RD2POL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, RD2POL, __x)
+#define MCDE_CRC_SYNCCTRL_SHIFT 29
+#define MCDE_CRC_SYNCCTRL_MASK 0x60000000
+#define MCDE_CRC_SYNCCTRL_NO_SYNC 0
+#define MCDE_CRC_SYNCCTRL_DBI0 1
+#define MCDE_CRC_SYNCCTRL_DBI1 2
+#define MCDE_CRC_SYNCCTRL_PING_PONG 3
+#define MCDE_CRC_SYNCCTRL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, MCDE_CRC_SYNCCTRL_##__x)
+#define MCDE_CRC_SYNCCTRL(__x) \
+ MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, __x)
+#define MCDE_CRC_CLAMPC1EN_SHIFT 31
+#define MCDE_CRC_CLAMPC1EN_MASK 0x80000000
+#define MCDE_CRC_CLAMPC1EN(__x) \
+ MCDE_VAL2REG(MCDE_CRC, CLAMPC1EN, __x)
+#define MCDE_PBCCRC0 0x00000C04
+#define MCDE_PBCCRC0_GROUPOFFSET 0x4
+#define MCDE_PBCCRC0_BSCM_SHIFT 0
+#define MCDE_PBCCRC0_BSCM_MASK 0x00000007
+#define MCDE_PBCCRC0_BSCM_1_8BIT 0
+#define MCDE_PBCCRC0_BSCM_2_8BIT 1
+#define MCDE_PBCCRC0_BSCM_3_8BIT 2
+#define MCDE_PBCCRC0_BSCM_1_16BIT 3
+#define MCDE_PBCCRC0_BSCM_2_16BIT 4
+#define MCDE_PBCCRC0_BSCM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, MCDE_PBCCRC0_BSCM_##__x)
+#define MCDE_PBCCRC0_BSCM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, __x)
+#define MCDE_PBCCRC0_BSDM_SHIFT 3
+#define MCDE_PBCCRC0_BSDM_MASK 0x00000038
+#define MCDE_PBCCRC0_BSDM_1_8BIT 0
+#define MCDE_PBCCRC0_BSDM_2_8BIT 1
+#define MCDE_PBCCRC0_BSDM_3_8BIT 2
+#define MCDE_PBCCRC0_BSDM_1_16BIT 3
+#define MCDE_PBCCRC0_BSDM_2_16BIT 4
+#define MCDE_PBCCRC0_BSDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, MCDE_PBCCRC0_BSDM_##__x)
+#define MCDE_PBCCRC0_BSDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, __x)
+#define MCDE_PBCCRC0_PDM_SHIFT 6
+#define MCDE_PBCCRC0_PDM_MASK 0x000000C0
+#define MCDE_PBCCRC0_PDM_NORMAL 0
+#define MCDE_PBCCRC0_PDM_16_TO_32 1
+#define MCDE_PBCCRC0_PDM_24_TO_32_RIGHT 2
+#define MCDE_PBCCRC0_PDM_24_TO_32_LEFT 3
+#define MCDE_PBCCRC0_PDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDM, MCDE_PBCCRC0_PDM_##__x)
+#define MCDE_PBCCRC0_PDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDM, __x)
+#define MCDE_PBCCRC0_PDCTRL_SHIFT 12
+#define MCDE_PBCCRC0_PDCTRL_MASK 0x00001000
+#define MCDE_PBCCRC0_PDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, PDCTRL, __x)
+#define MCDE_PBCCRC0_BPP_SHIFT 13
+#define MCDE_PBCCRC0_BPP_MASK 0x0000E000
+#define MCDE_PBCCRC0_BPP_8BPP 0
+#define MCDE_PBCCRC0_BPP_12BPP 1
+#define MCDE_PBCCRC0_BPP_15BPP 2
+#define MCDE_PBCCRC0_BPP_16BPP 3
+#define MCDE_PBCCRC0_BPP_18BPP 4
+#define MCDE_PBCCRC0_BPP_24BPP 5
+#define MCDE_PBCCRC0_BPP(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC0, BPP, __x)
+#define MCDE_PBCCRC1 0x00000C08
+#define MCDE_PBCCRC1_BSCM_SHIFT 0
+#define MCDE_PBCCRC1_BSCM_MASK 0x00000007
+#define MCDE_PBCCRC1_BSCM_1_8BIT 0
+#define MCDE_PBCCRC1_BSCM_2_8BIT 1
+#define MCDE_PBCCRC1_BSCM_3_8BIT 2
+#define MCDE_PBCCRC1_BSCM_1_16BIT 3
+#define MCDE_PBCCRC1_BSCM_2_16BIT 4
+#define MCDE_PBCCRC1_BSCM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, MCDE_PBCCRC1_BSCM_##__x)
+#define MCDE_PBCCRC1_BSCM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, __x)
+#define MCDE_PBCCRC1_BSDM_SHIFT 3
+#define MCDE_PBCCRC1_BSDM_MASK 0x00000038
+#define MCDE_PBCCRC1_BSDM_1_8BIT 0
+#define MCDE_PBCCRC1_BSDM_2_8BIT 1
+#define MCDE_PBCCRC1_BSDM_3_8BIT 2
+#define MCDE_PBCCRC1_BSDM_1_16BIT 3
+#define MCDE_PBCCRC1_BSDM_2_16BIT 4
+#define MCDE_PBCCRC1_BSDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, MCDE_PBCCRC1_BSDM_##__x)
+#define MCDE_PBCCRC1_BSDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, __x)
+#define MCDE_PBCCRC1_PDM_SHIFT 6
+#define MCDE_PBCCRC1_PDM_MASK 0x000000C0
+#define MCDE_PBCCRC1_PDM_NORMAL 0
+#define MCDE_PBCCRC1_PDM_16_TO_32 1
+#define MCDE_PBCCRC1_PDM_24_TO_32_RIGHT 2
+#define MCDE_PBCCRC1_PDM_24_TO_32_LEFT 3
+#define MCDE_PBCCRC1_PDM_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDM, MCDE_PBCCRC1_PDM_##__x)
+#define MCDE_PBCCRC1_PDM(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDM, __x)
+#define MCDE_PBCCRC1_PDCTRL_SHIFT 12
+#define MCDE_PBCCRC1_PDCTRL_MASK 0x00001000
+#define MCDE_PBCCRC1_PDCTRL(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, PDCTRL, __x)
+#define MCDE_PBCCRC1_BPP_SHIFT 13
+#define MCDE_PBCCRC1_BPP_MASK 0x0000E000
+#define MCDE_PBCCRC1_BPP_8BPP 0
+#define MCDE_PBCCRC1_BPP_12BPP 1
+#define MCDE_PBCCRC1_BPP_15BPP 2
+#define MCDE_PBCCRC1_BPP_16BPP 3
+#define MCDE_PBCCRC1_BPP_18BPP 4
+#define MCDE_PBCCRC1_BPP_24BPP 5
+#define MCDE_PBCCRC1_BPP(__x) \
+ MCDE_VAL2REG(MCDE_PBCCRC1, BPP, __x)
+#define MCDE_PBCBMRC00 0x00000C0C
+#define MCDE_PBCBMRC00_GROUPOFFSET 0x4
+#define MCDE_PBCBMRC00_MUXI_SHIFT 0
+#define MCDE_PBCBMRC00_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC00_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC00, MUXI, __x)
+#define MCDE_PBCBMRC01 0x00000C10
+#define MCDE_PBCBMRC01_MUXI_SHIFT 0
+#define MCDE_PBCBMRC01_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC01_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC01, MUXI, __x)
+#define MCDE_PBCBMRC02 0x00000C14
+#define MCDE_PBCBMRC02_MUXI_SHIFT 0
+#define MCDE_PBCBMRC02_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC02_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC02, MUXI, __x)
+#define MCDE_PBCBMRC03 0x00000C18
+#define MCDE_PBCBMRC03_MUXI_SHIFT 0
+#define MCDE_PBCBMRC03_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC03_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC03, MUXI, __x)
+#define MCDE_PBCBMRC04 0x00000C1C
+#define MCDE_PBCBMRC04_MUXI_SHIFT 0
+#define MCDE_PBCBMRC04_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC04_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC04, MUXI, __x)
+#define MCDE_PBCBMRC10 0x00000C20
+#define MCDE_PBCBMRC10_MUXI_SHIFT 0
+#define MCDE_PBCBMRC10_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC10_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC10, MUXI, __x)
+#define MCDE_PBCBMRC11 0x00000C24
+#define MCDE_PBCBMRC11_MUXI_SHIFT 0
+#define MCDE_PBCBMRC11_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC11_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC11, MUXI, __x)
+#define MCDE_PBCBMRC12 0x00000C28
+#define MCDE_PBCBMRC12_MUXI_SHIFT 0
+#define MCDE_PBCBMRC12_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC12_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC12, MUXI, __x)
+#define MCDE_PBCBMRC13 0x00000C2C
+#define MCDE_PBCBMRC13_MUXI_SHIFT 0
+#define MCDE_PBCBMRC13_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC13_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC13, MUXI, __x)
+#define MCDE_PBCBMRC14 0x00000C30
+#define MCDE_PBCBMRC14_MUXI_SHIFT 0
+#define MCDE_PBCBMRC14_MUXI_MASK 0xFFFFFFFF
+#define MCDE_PBCBMRC14_MUXI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBMRC14, MUXI, __x)
+#define MCDE_PBCBCRC00 0x00000C34
+#define MCDE_PBCBCRC00_GROUPOFFSET 0x4
+#define MCDE_PBCBCRC00_CTLI_SHIFT 0
+#define MCDE_PBCBCRC00_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC00_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC00, CTLI, __x)
+#define MCDE_PBCBCRC10 0x00000C38
+#define MCDE_PBCBCRC10_CTLI_SHIFT 0
+#define MCDE_PBCBCRC10_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC10_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC10, CTLI, __x)
+#define MCDE_PBCBCRC01 0x00000C48
+#define MCDE_PBCBCRC01_GROUPOFFSET 0x4
+#define MCDE_PBCBCRC01_CTLI_SHIFT 0
+#define MCDE_PBCBCRC01_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC01_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC01, CTLI, __x)
+#define MCDE_PBCBCRC11 0x00000C4C
+#define MCDE_PBCBCRC11_CTLI_SHIFT 0
+#define MCDE_PBCBCRC11_CTLI_MASK 0xFFFFFFFF
+#define MCDE_PBCBCRC11_CTLI(__x) \
+ MCDE_VAL2REG(MCDE_PBCBCRC11, CTLI, __x)
+#define MCDE_VSCRC0 0x00000C5C
+#define MCDE_VSCRC0_GROUPOFFSET 0x4
+#define MCDE_VSCRC0_VSPMIN_SHIFT 0
+#define MCDE_VSCRC0_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC0_VSPMIN(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPMIN, __x)
+#define MCDE_VSCRC0_VSPMAX_SHIFT 12
+#define MCDE_VSCRC0_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC0_VSPMAX(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPMAX, __x)
+#define MCDE_VSCRC0_VSPDIV_SHIFT 24
+#define MCDE_VSCRC0_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_1 0
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_2 1
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_4 2
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_8 3
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_16 4
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_32 5
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_64 6
+#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_128 7
+#define MCDE_VSCRC0_VSPDIV_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, MCDE_VSCRC0_VSPDIV_##__x)
+#define MCDE_VSCRC0_VSPDIV(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, __x)
+#define MCDE_VSCRC0_VSPOL_SHIFT 27
+#define MCDE_VSCRC0_VSPOL_MASK 0x08000000
+#define MCDE_VSCRC0_VSPOL_ACTIVE_HIGH 0
+#define MCDE_VSCRC0_VSPOL_ACTIVE_LOW 1
+#define MCDE_VSCRC0_VSPOL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, MCDE_VSCRC0_VSPOL_##__x)
+#define MCDE_VSCRC0_VSPOL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, __x)
+#define MCDE_VSCRC0_VSSEL_SHIFT 28
+#define MCDE_VSCRC0_VSSEL_MASK 0x10000000
+#define MCDE_VSCRC0_VSSEL_VSYNC0 0
+#define MCDE_VSCRC0_VSSEL_VSYNC1 1
+#define MCDE_VSCRC0_VSSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, MCDE_VSCRC0_VSSEL_##__x)
+#define MCDE_VSCRC0_VSSEL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, __x)
+#define MCDE_VSCRC0_VSDBL_SHIFT 29
+#define MCDE_VSCRC0_VSDBL_MASK 0xE0000000
+#define MCDE_VSCRC0_VSDBL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC0, VSDBL, __x)
+#define MCDE_VSCRC1 0x00000C60
+#define MCDE_VSCRC1_VSPMIN_SHIFT 0
+#define MCDE_VSCRC1_VSPMIN_MASK 0x00000FFF
+#define MCDE_VSCRC1_VSPMIN(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPMIN, __x)
+#define MCDE_VSCRC1_VSPMAX_SHIFT 12
+#define MCDE_VSCRC1_VSPMAX_MASK 0x00FFF000
+#define MCDE_VSCRC1_VSPMAX(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPMAX, __x)
+#define MCDE_VSCRC1_VSPDIV_SHIFT 24
+#define MCDE_VSCRC1_VSPDIV_MASK 0x07000000
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_1 0
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_2 1
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_4 2
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_8 3
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_16 4
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_32 5
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_64 6
+#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_128 7
+#define MCDE_VSCRC1_VSPDIV_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, MCDE_VSCRC1_VSPDIV_##__x)
+#define MCDE_VSCRC1_VSPDIV(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, __x)
+#define MCDE_VSCRC1_VSPOL_SHIFT 27
+#define MCDE_VSCRC1_VSPOL_MASK 0x08000000
+#define MCDE_VSCRC1_VSPOL_ACTIVE_HIGH 0
+#define MCDE_VSCRC1_VSPOL_ACTIVE_LOW 1
+#define MCDE_VSCRC1_VSPOL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, MCDE_VSCRC1_VSPOL_##__x)
+#define MCDE_VSCRC1_VSPOL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, __x)
+#define MCDE_VSCRC1_VSSEL_SHIFT 28
+#define MCDE_VSCRC1_VSSEL_MASK 0x10000000
+#define MCDE_VSCRC1_VSSEL_VSYNC0 0
+#define MCDE_VSCRC1_VSSEL_VSYNC1 1
+#define MCDE_VSCRC1_VSSEL_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, MCDE_VSCRC1_VSSEL_##__x)
+#define MCDE_VSCRC1_VSSEL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, __x)
+#define MCDE_VSCRC1_VSDBL_SHIFT 29
+#define MCDE_VSCRC1_VSDBL_MASK 0xE0000000
+#define MCDE_VSCRC1_VSDBL(__x) \
+ MCDE_VAL2REG(MCDE_VSCRC1, VSDBL, __x)
+#define MCDE_SCTRC 0x00000C64
+#define MCDE_SCTRC_SYNCDELC0_SHIFT 0
+#define MCDE_SCTRC_SYNCDELC0_MASK 0x000000FF
+#define MCDE_SCTRC_SYNCDELC0(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC0, __x)
+#define MCDE_SCTRC_SYNCDELC1_SHIFT 8
+#define MCDE_SCTRC_SYNCDELC1_MASK 0x0000FF00
+#define MCDE_SCTRC_SYNCDELC1(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC1, __x)
+#define MCDE_SCTRC_TRDELC_SHIFT 16
+#define MCDE_SCTRC_TRDELC_MASK 0x0FFF0000
+#define MCDE_SCTRC_TRDELC(__x) \
+ MCDE_VAL2REG(MCDE_SCTRC, TRDELC, __x)
+#define MCDE_SCSRC 0x00000C68
+#define MCDE_SCSRC_VSTAC0_SHIFT 0
+#define MCDE_SCSRC_VSTAC0_MASK 0x00000001
+#define MCDE_SCSRC_VSTAC0(__x) \
+ MCDE_VAL2REG(MCDE_SCSRC, VSTAC0, __x)
+#define MCDE_SCSRC_VSTAC1_SHIFT 1
+#define MCDE_SCSRC_VSTAC1_MASK 0x00000002
+#define MCDE_SCSRC_VSTAC1(__x) \
+ MCDE_VAL2REG(MCDE_SCSRC, VSTAC1, __x)
+#define MCDE_BCNR0 0x00000C6C
+#define MCDE_BCNR0_GROUPOFFSET 0x4
+#define MCDE_BCNR0_BCN_SHIFT 0
+#define MCDE_BCNR0_BCN_MASK 0x000000FF
+#define MCDE_BCNR0_BCN(__x) \
+ MCDE_VAL2REG(MCDE_BCNR0, BCN, __x)
+#define MCDE_BCNR1 0x00000C70
+#define MCDE_BCNR1_BCN_SHIFT 0
+#define MCDE_BCNR1_BCN_MASK 0x000000FF
+#define MCDE_BCNR1_BCN(__x) \
+ MCDE_VAL2REG(MCDE_BCNR1, BCN, __x)
+#define MCDE_CSCDTR0 0x00000C74
+#define MCDE_CSCDTR0_GROUPOFFSET 0x4
+#define MCDE_CSCDTR0_CSACT_SHIFT 0
+#define MCDE_CSCDTR0_CSACT_MASK 0x000000FF
+#define MCDE_CSCDTR0_CSACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CSACT, __x)
+#define MCDE_CSCDTR0_CSDEACT_SHIFT 8
+#define MCDE_CSCDTR0_CSDEACT_MASK 0x0000FF00
+#define MCDE_CSCDTR0_CSDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CSDEACT, __x)
+#define MCDE_CSCDTR0_CDACT_SHIFT 16
+#define MCDE_CSCDTR0_CDACT_MASK 0x00FF0000
+#define MCDE_CSCDTR0_CDACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CDACT, __x)
+#define MCDE_CSCDTR0_CDDEACT_SHIFT 24
+#define MCDE_CSCDTR0_CDDEACT_MASK 0xFF000000
+#define MCDE_CSCDTR0_CDDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR0, CDDEACT, __x)
+#define MCDE_CSCDTR1 0x00000C78
+#define MCDE_CSCDTR1_CSACT_SHIFT 0
+#define MCDE_CSCDTR1_CSACT_MASK 0x000000FF
+#define MCDE_CSCDTR1_CSACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CSACT, __x)
+#define MCDE_CSCDTR1_CSDEACT_SHIFT 8
+#define MCDE_CSCDTR1_CSDEACT_MASK 0x0000FF00
+#define MCDE_CSCDTR1_CSDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CSDEACT, __x)
+#define MCDE_CSCDTR1_CDACT_SHIFT 16
+#define MCDE_CSCDTR1_CDACT_MASK 0x00FF0000
+#define MCDE_CSCDTR1_CDACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CDACT, __x)
+#define MCDE_CSCDTR1_CDDEACT_SHIFT 24
+#define MCDE_CSCDTR1_CDDEACT_MASK 0xFF000000
+#define MCDE_CSCDTR1_CDDEACT(__x) \
+ MCDE_VAL2REG(MCDE_CSCDTR1, CDDEACT, __x)
+#define MCDE_RDWRTR0 0x00000C7C
+#define MCDE_RDWRTR0_GROUPOFFSET 0x4
+#define MCDE_RDWRTR0_RWACT_SHIFT 0
+#define MCDE_RDWRTR0_RWACT_MASK 0x000000FF
+#define MCDE_RDWRTR0_RWACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, RWACT, __x)
+#define MCDE_RDWRTR0_RWDEACT_SHIFT 8
+#define MCDE_RDWRTR0_RWDEACT_MASK 0x0000FF00
+#define MCDE_RDWRTR0_RWDEACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, RWDEACT, __x)
+#define MCDE_RDWRTR0_MOTINT_SHIFT 16
+#define MCDE_RDWRTR0_MOTINT_MASK 0x00010000
+#define MCDE_RDWRTR0_MOTINT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR0, MOTINT, __x)
+#define MCDE_RDWRTR1 0x00000C80
+#define MCDE_RDWRTR1_RWACT_SHIFT 0
+#define MCDE_RDWRTR1_RWACT_MASK 0x000000FF
+#define MCDE_RDWRTR1_RWACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, RWACT, __x)
+#define MCDE_RDWRTR1_RWDEACT_SHIFT 8
+#define MCDE_RDWRTR1_RWDEACT_MASK 0x0000FF00
+#define MCDE_RDWRTR1_RWDEACT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, RWDEACT, __x)
+#define MCDE_RDWRTR1_MOTINT_SHIFT 16
+#define MCDE_RDWRTR1_MOTINT_MASK 0x00010000
+#define MCDE_RDWRTR1_MOTINT(__x) \
+ MCDE_VAL2REG(MCDE_RDWRTR1, MOTINT, __x)
+#define MCDE_DOTR0 0x00000C84
+#define MCDE_DOTR0_GROUPOFFSET 0x4
+#define MCDE_DOTR0_DOACT_SHIFT 0
+#define MCDE_DOTR0_DOACT_MASK 0x000000FF
+#define MCDE_DOTR0_DOACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR0, DOACT, __x)
+#define MCDE_DOTR0_DODEACT_SHIFT 8
+#define MCDE_DOTR0_DODEACT_MASK 0x0000FF00
+#define MCDE_DOTR0_DODEACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR0, DODEACT, __x)
+#define MCDE_DOTR1 0x00000C88
+#define MCDE_DOTR1_DOACT_SHIFT 0
+#define MCDE_DOTR1_DOACT_MASK 0x000000FF
+#define MCDE_DOTR1_DOACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR1, DOACT, __x)
+#define MCDE_DOTR1_DODEACT_SHIFT 8
+#define MCDE_DOTR1_DODEACT_MASK 0x0000FF00
+#define MCDE_DOTR1_DODEACT(__x) \
+ MCDE_VAL2REG(MCDE_DOTR1, DODEACT, __x)
+#define MCDE_WDATADC0 0x00000C94
+#define MCDE_WDATADC0_GROUPOFFSET 0x4
+#define MCDE_WDATADC0_DATAVALUE_SHIFT 0
+#define MCDE_WDATADC0_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_WDATADC0_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC0, DATAVALUE, __x)
+#define MCDE_WDATADC0_DC_SHIFT 24
+#define MCDE_WDATADC0_DC_MASK 0x01000000
+#define MCDE_WDATADC0_DC(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC0, DC, __x)
+#define MCDE_WDATADC1 0x00000C98
+#define MCDE_WDATADC1_DATAVALUE_SHIFT 0
+#define MCDE_WDATADC1_DATAVALUE_MASK 0x00FFFFFF
+#define MCDE_WDATADC1_DATAVALUE(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC1, DATAVALUE, __x)
+#define MCDE_WDATADC1_DC_SHIFT 24
+#define MCDE_WDATADC1_DC_MASK 0x01000000
+#define MCDE_WDATADC1_DC(__x) \
+ MCDE_VAL2REG(MCDE_WDATADC1, DC, __x)
+#define MCDE_RDATADC0 0x00000C9C
+#define MCDE_RDATADC0_GROUPOFFSET 0x4
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_SHIFT 0
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF
+#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC0, DATAREADFROMDISPLAYMODULE, __x)
+#define MCDE_RDATADC0_STARTREAD_SHIFT 16
+#define MCDE_RDATADC0_STARTREAD_MASK 0x00010000
+#define MCDE_RDATADC0_STARTREAD(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC0, STARTREAD, __x)
+#define MCDE_RDATADC1 0x00000CA0
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_SHIFT 0
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF
+#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC1, DATAREADFROMDISPLAYMODULE, __x)
+#define MCDE_RDATADC1_STARTREAD_SHIFT 16
+#define MCDE_RDATADC1_STARTREAD_MASK 0x00010000
+#define MCDE_RDATADC1_STARTREAD(__x) \
+ MCDE_VAL2REG(MCDE_RDATADC1, STARTREAD, __x)
+#define MCDE_STATC 0x00000CA4
+#define MCDE_STATC_STATBUSY0_SHIFT 0
+#define MCDE_STATC_STATBUSY0_MASK 0x00000001
+#define MCDE_STATC_STATBUSY0(__x) \
+ MCDE_VAL2REG(MCDE_STATC, STATBUSY0, __x)
+#define MCDE_STATC_STATBUSY1_SHIFT 5
+#define MCDE_STATC_STATBUSY1_MASK 0x00000020
+#define MCDE_STATC_STATBUSY1(__x) \
+ MCDE_VAL2REG(MCDE_STATC, STATBUSY1, __x)
+#define MCDE_CTRLC0 0x00000CA8
+#define MCDE_CTRLC0_GROUPOFFSET 0x4
+#define MCDE_CTRLC0_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLC0_FIFOWTRMRK_MASK 0x000000FF
+#define MCDE_CTRLC0_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOWTRMRK, __x)
+#define MCDE_CTRLC0_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLC0_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLC0_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOEMPTY, __x)
+#define MCDE_CTRLC0_FIFOFULL_SHIFT 13
+#define MCDE_CTRLC0_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLC0_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FIFOFULL, __x)
+#define MCDE_CTRLC0_FORMID_SHIFT 16
+#define MCDE_CTRLC0_FORMID_MASK 0x00070000
+#define MCDE_CTRLC0_FORMID_DSI0VID 0
+#define MCDE_CTRLC0_FORMID_DSI0CMD 1
+#define MCDE_CTRLC0_FORMID_DSI1VID 2
+#define MCDE_CTRLC0_FORMID_DSI1CMD 3
+#define MCDE_CTRLC0_FORMID_DSI2VID 4
+#define MCDE_CTRLC0_FORMID_DSI2CMD 5
+#define MCDE_CTRLC0_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMID, MCDE_CTRLC0_FORMID_##__x)
+#define MCDE_CTRLC0_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMID, __x)
+#define MCDE_CTRLC0_FORMTYPE_SHIFT 20
+#define MCDE_CTRLC0_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLC0_FORMTYPE_DPITV 0
+#define MCDE_CTRLC0_FORMTYPE_DBI 1
+#define MCDE_CTRLC0_FORMTYPE_DSI 2
+#define MCDE_CTRLC0_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, MCDE_CTRLC0_FORMTYPE_##__x)
+#define MCDE_CTRLC0_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, __x)
+#define MCDE_CTRLC1 0x00000CAC
+#define MCDE_CTRLC1_FIFOWTRMRK_SHIFT 0
+#define MCDE_CTRLC1_FIFOWTRMRK_MASK 0x000000FF
+#define MCDE_CTRLC1_FIFOWTRMRK(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOWTRMRK, __x)
+#define MCDE_CTRLC1_FIFOEMPTY_SHIFT 12
+#define MCDE_CTRLC1_FIFOEMPTY_MASK 0x00001000
+#define MCDE_CTRLC1_FIFOEMPTY(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOEMPTY, __x)
+#define MCDE_CTRLC1_FIFOFULL_SHIFT 13
+#define MCDE_CTRLC1_FIFOFULL_MASK 0x00002000
+#define MCDE_CTRLC1_FIFOFULL(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FIFOFULL, __x)
+#define MCDE_CTRLC1_FORMID_SHIFT 16
+#define MCDE_CTRLC1_FORMID_MASK 0x00070000
+#define MCDE_CTRLC1_FORMID_DSI0VID 0
+#define MCDE_CTRLC1_FORMID_DSI0CMD 1
+#define MCDE_CTRLC1_FORMID_DSI1VID 2
+#define MCDE_CTRLC1_FORMID_DSI1CMD 3
+#define MCDE_CTRLC1_FORMID_DSI2VID 4
+#define MCDE_CTRLC1_FORMID_DSI2CMD 5
+#define MCDE_CTRLC1_FORMID_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMID, MCDE_CTRLC1_FORMID_##__x)
+#define MCDE_CTRLC1_FORMID(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMID, __x)
+#define MCDE_CTRLC1_FORMTYPE_SHIFT 20
+#define MCDE_CTRLC1_FORMTYPE_MASK 0x00700000
+#define MCDE_CTRLC1_FORMTYPE_DPITV 0
+#define MCDE_CTRLC1_FORMTYPE_DBI 1
+#define MCDE_CTRLC1_FORMTYPE_DSI 2
+#define MCDE_CTRLC1_FORMTYPE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, MCDE_CTRLC1_FORMTYPE_##__x)
+#define MCDE_CTRLC1_FORMTYPE(__x) \
+ MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, __x)
+#define MCDE_DSIVID0CONF0 0x00000E00
+#define MCDE_DSIVID0CONF0_GROUPOFFSET 0x20
+#define MCDE_DSIVID0CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID0CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID0CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BLANKING, __x)
+#define MCDE_DSIVID0CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID0CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID0CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID0CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID0CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, \
+ MCDE_DSIVID0CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID0CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, __x)
+#define MCDE_DSIVID0CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID0CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID0CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, CMD8, __x)
+#define MCDE_DSIVID0CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID0CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID0CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID0CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID0CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID0CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID0CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID0CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID0CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID0CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID0CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID0CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID0CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID0CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, \
+ MCDE_DSIVID0CONF0_PACKING_##__x)
+#define MCDE_DSIVID0CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, __x)
+#define MCDE_DSICMD0CONF0 0x00000E20
+#define MCDE_DSICMD0CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD0CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD0CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BLANKING, __x)
+#define MCDE_DSICMD0CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD0CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD0CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD0CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD0CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, \
+ MCDE_DSICMD0CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD0CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, __x)
+#define MCDE_DSICMD0CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD0CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD0CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, CMD8, __x)
+#define MCDE_DSICMD0CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD0CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD0CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD0CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD0CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD0CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD0CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD0CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD0CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD0CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD0CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD0CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD0CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD0CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, \
+ MCDE_DSICMD0CONF0_PACKING_##__x)
+#define MCDE_DSICMD0CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, __x)
+#define MCDE_DSIVID1CONF0 0x00000E40
+#define MCDE_DSIVID1CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID1CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID1CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BLANKING, __x)
+#define MCDE_DSIVID1CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID1CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID1CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID1CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID1CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, \
+ MCDE_DSIVID1CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID1CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, __x)
+#define MCDE_DSIVID1CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID1CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID1CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, CMD8, __x)
+#define MCDE_DSIVID1CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID1CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID1CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID1CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID1CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID1CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID1CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID1CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID1CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID1CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID1CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID1CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID1CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID1CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, \
+ MCDE_DSIVID1CONF0_PACKING_##__x)
+#define MCDE_DSIVID1CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, __x)
+#define MCDE_DSICMD1CONF0 0x00000E60
+#define MCDE_DSICMD1CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD1CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD1CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BLANKING, __x)
+#define MCDE_DSICMD1CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD1CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD1CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD1CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD1CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, \
+ MCDE_DSICMD1CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD1CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, __x)
+#define MCDE_DSICMD1CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD1CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD1CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, CMD8, __x)
+#define MCDE_DSICMD1CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD1CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD1CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD1CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD1CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD1CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD1CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD1CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD1CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD1CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD1CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD1CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD1CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD1CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, \
+ MCDE_DSICMD1CONF0_PACKING_##__x)
+#define MCDE_DSICMD1CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, __x)
+#define MCDE_DSIVID2CONF0 0x00000E80
+#define MCDE_DSIVID2CONF0_BLANKING_SHIFT 0
+#define MCDE_DSIVID2CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSIVID2CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BLANKING, __x)
+#define MCDE_DSIVID2CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSIVID2CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSIVID2CONF0_VID_MODE_CMD 0
+#define MCDE_DSIVID2CONF0_VID_MODE_VID 1
+#define MCDE_DSIVID2CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, \
+ MCDE_DSIVID2CONF0_VID_MODE_##__x)
+#define MCDE_DSIVID2CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, __x)
+#define MCDE_DSIVID2CONF0_CMD8_SHIFT 13
+#define MCDE_DSIVID2CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSIVID2CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, CMD8, __x)
+#define MCDE_DSIVID2CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSIVID2CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSIVID2CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BIT_SWAP, __x)
+#define MCDE_DSIVID2CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSIVID2CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSIVID2CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, BYTE_SWAP, __x)
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSIVID2CONF0_PACKING_SHIFT 20
+#define MCDE_DSIVID2CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSIVID2CONF0_PACKING_RGB565 0
+#define MCDE_DSIVID2CONF0_PACKING_RGB666 1
+#define MCDE_DSIVID2CONF0_PACKING_RGB888 2
+#define MCDE_DSIVID2CONF0_PACKING_BGR888 3
+#define MCDE_DSIVID2CONF0_PACKING_HDTV 4
+#define MCDE_DSIVID2CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, \
+ MCDE_DSIVID2CONF0_PACKING_##__x)
+#define MCDE_DSIVID2CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, __x)
+#define MCDE_DSICMD2CONF0 0x00000EA0
+#define MCDE_DSICMD2CONF0_BLANKING_SHIFT 0
+#define MCDE_DSICMD2CONF0_BLANKING_MASK 0x000000FF
+#define MCDE_DSICMD2CONF0_BLANKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BLANKING, __x)
+#define MCDE_DSICMD2CONF0_VID_MODE_SHIFT 12
+#define MCDE_DSICMD2CONF0_VID_MODE_MASK 0x00001000
+#define MCDE_DSICMD2CONF0_VID_MODE_CMD 0
+#define MCDE_DSICMD2CONF0_VID_MODE_VID 1
+#define MCDE_DSICMD2CONF0_VID_MODE_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, \
+ MCDE_DSICMD2CONF0_VID_MODE_##__x)
+#define MCDE_DSICMD2CONF0_VID_MODE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, __x)
+#define MCDE_DSICMD2CONF0_CMD8_SHIFT 13
+#define MCDE_DSICMD2CONF0_CMD8_MASK 0x00002000
+#define MCDE_DSICMD2CONF0_CMD8(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, CMD8, __x)
+#define MCDE_DSICMD2CONF0_BIT_SWAP_SHIFT 16
+#define MCDE_DSICMD2CONF0_BIT_SWAP_MASK 0x00010000
+#define MCDE_DSICMD2CONF0_BIT_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BIT_SWAP, __x)
+#define MCDE_DSICMD2CONF0_BYTE_SWAP_SHIFT 17
+#define MCDE_DSICMD2CONF0_BYTE_SWAP_MASK 0x00020000
+#define MCDE_DSICMD2CONF0_BYTE_SWAP(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, BYTE_SWAP, __x)
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_SHIFT 18
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_MASK 0x00040000
+#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, DCSVID_NOTGEN, __x)
+#define MCDE_DSICMD2CONF0_PACKING_SHIFT 20
+#define MCDE_DSICMD2CONF0_PACKING_MASK 0x00700000
+#define MCDE_DSICMD2CONF0_PACKING_RGB565 0
+#define MCDE_DSICMD2CONF0_PACKING_RGB666 1
+#define MCDE_DSICMD2CONF0_PACKING_RGB888 2
+#define MCDE_DSICMD2CONF0_PACKING_BGR888 3
+#define MCDE_DSICMD2CONF0_PACKING_HDTV 4
+#define MCDE_DSICMD2CONF0_PACKING_ENUM(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, \
+ MCDE_DSICMD2CONF0_PACKING_##__x)
+#define MCDE_DSICMD2CONF0_PACKING(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, __x)
+#define MCDE_DSIVID0FRAME 0x00000E04
+#define MCDE_DSIVID0FRAME_GROUPOFFSET 0x20
+#define MCDE_DSIVID0FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID0FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID0FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0FRAME, FRAME, __x)
+#define MCDE_DSICMD0FRAME 0x00000E24
+#define MCDE_DSICMD0FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD0FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD0FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0FRAME, FRAME, __x)
+#define MCDE_DSIVID1FRAME 0x00000E44
+#define MCDE_DSIVID1FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID1FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID1FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1FRAME, FRAME, __x)
+#define MCDE_DSICMD1FRAME 0x00000E64
+#define MCDE_DSICMD1FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD1FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD1FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1FRAME, FRAME, __x)
+#define MCDE_DSIVID2FRAME 0x00000E84
+#define MCDE_DSIVID2FRAME_FRAME_SHIFT 0
+#define MCDE_DSIVID2FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSIVID2FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2FRAME, FRAME, __x)
+#define MCDE_DSICMD2FRAME 0x00000EA4
+#define MCDE_DSICMD2FRAME_FRAME_SHIFT 0
+#define MCDE_DSICMD2FRAME_FRAME_MASK 0x00FFFFFF
+#define MCDE_DSICMD2FRAME_FRAME(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2FRAME, FRAME, __x)
+#define MCDE_DSIVID0PKT 0x00000E08
+#define MCDE_DSIVID0PKT_GROUPOFFSET 0x20
+#define MCDE_DSIVID0PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID0PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID0PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0PKT, PACKET, __x)
+#define MCDE_DSICMD0PKT 0x00000E28
+#define MCDE_DSICMD0PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD0PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD0PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0PKT, PACKET, __x)
+#define MCDE_DSIVID1PKT 0x00000E48
+#define MCDE_DSIVID1PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID1PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID1PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1PKT, PACKET, __x)
+#define MCDE_DSICMD1PKT 0x00000E68
+#define MCDE_DSICMD1PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD1PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD1PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1PKT, PACKET, __x)
+#define MCDE_DSIVID2PKT 0x00000E88
+#define MCDE_DSIVID2PKT_PACKET_SHIFT 0
+#define MCDE_DSIVID2PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSIVID2PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2PKT, PACKET, __x)
+#define MCDE_DSICMD2PKT 0x00000EA8
+#define MCDE_DSICMD2PKT_PACKET_SHIFT 0
+#define MCDE_DSICMD2PKT_PACKET_MASK 0x0000FFFF
+#define MCDE_DSICMD2PKT_PACKET(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2PKT, PACKET, __x)
+#define MCDE_DSIVID0SYNC 0x00000E0C
+#define MCDE_DSIVID0SYNC_GROUPOFFSET 0x20
+#define MCDE_DSIVID0SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID0SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID0SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0SYNC, DMA, __x)
+#define MCDE_DSIVID0SYNC_SW_SHIFT 16
+#define MCDE_DSIVID0SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID0SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0SYNC, SW, __x)
+#define MCDE_DSICMD0SYNC 0x00000E2C
+#define MCDE_DSICMD0SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD0SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD0SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0SYNC, DMA, __x)
+#define MCDE_DSICMD0SYNC_SW_SHIFT 16
+#define MCDE_DSICMD0SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD0SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0SYNC, SW, __x)
+#define MCDE_DSIVID1SYNC 0x00000E4C
+#define MCDE_DSIVID1SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID1SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID1SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1SYNC, DMA, __x)
+#define MCDE_DSIVID1SYNC_SW_SHIFT 16
+#define MCDE_DSIVID1SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID1SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1SYNC, SW, __x)
+#define MCDE_DSICMD1SYNC 0x00000E6C
+#define MCDE_DSICMD1SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD1SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD1SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1SYNC, DMA, __x)
+#define MCDE_DSICMD1SYNC_SW_SHIFT 16
+#define MCDE_DSICMD1SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD1SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1SYNC, SW, __x)
+#define MCDE_DSIVID2SYNC 0x00000E8C
+#define MCDE_DSIVID2SYNC_DMA_SHIFT 0
+#define MCDE_DSIVID2SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSIVID2SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2SYNC, DMA, __x)
+#define MCDE_DSIVID2SYNC_SW_SHIFT 16
+#define MCDE_DSIVID2SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSIVID2SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2SYNC, SW, __x)
+#define MCDE_DSICMD2SYNC 0x00000EAC
+#define MCDE_DSICMD2SYNC_DMA_SHIFT 0
+#define MCDE_DSICMD2SYNC_DMA_MASK 0x00000FFF
+#define MCDE_DSICMD2SYNC_DMA(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2SYNC, DMA, __x)
+#define MCDE_DSICMD2SYNC_SW_SHIFT 16
+#define MCDE_DSICMD2SYNC_SW_MASK 0x0FFF0000
+#define MCDE_DSICMD2SYNC_SW(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2SYNC, SW, __x)
+#define MCDE_DSIVID0CMDW 0x00000E10
+#define MCDE_DSIVID0CMDW_GROUPOFFSET 0x20
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID0CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID0CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID0CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID0CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_START, __x)
+#define MCDE_DSICMD0CMDW 0x00000E30
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD0CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD0CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD0CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD0CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_START, __x)
+#define MCDE_DSIVID1CMDW 0x00000E50
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID1CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID1CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID1CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID1CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_START, __x)
+#define MCDE_DSICMD1CMDW 0x00000E70
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD1CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD1CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD1CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD1CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_START, __x)
+#define MCDE_DSIVID2CMDW 0x00000E90
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSIVID2CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSIVID2CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSIVID2CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSIVID2CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_START, __x)
+#define MCDE_DSICMD2CMDW 0x00000EB0
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_SHIFT 0
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF
+#define MCDE_DSICMD2CMDW_CMDW_CONTINUE(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_CONTINUE, __x)
+#define MCDE_DSICMD2CMDW_CMDW_START_SHIFT 16
+#define MCDE_DSICMD2CMDW_CMDW_START_MASK 0xFFFF0000
+#define MCDE_DSICMD2CMDW_CMDW_START(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_START, __x)
+#define MCDE_DSIVID0DELAY0 0x00000E14
+#define MCDE_DSIVID0DELAY0_GROUPOFFSET 0x20
+#define MCDE_DSIVID0DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID0DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID0DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD0DELAY0 0x00000E34
+#define MCDE_DSICMD0DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD0DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD0DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID1DELAY0 0x00000E54
+#define MCDE_DSIVID1DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID1DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID1DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD1DELAY0 0x00000E74
+#define MCDE_DSICMD1DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD1DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD1DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID2DELAY0 0x00000E94
+#define MCDE_DSIVID2DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSIVID2DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSIVID2DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY0, INTPKTDEL, __x)
+#define MCDE_DSICMD2DELAY0 0x00000EB4
+#define MCDE_DSICMD2DELAY0_INTPKTDEL_SHIFT 0
+#define MCDE_DSICMD2DELAY0_INTPKTDEL_MASK 0x0000FFFF
+#define MCDE_DSICMD2DELAY0_INTPKTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY0, INTPKTDEL, __x)
+#define MCDE_DSIVID0DELAY1 0x00000E18
+#define MCDE_DSIVID0DELAY1_GROUPOFFSET 0x20
+#define MCDE_DSIVID0DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID0DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID0DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID0DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD0DELAY1 0x00000E38
+#define MCDE_DSICMD0DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD0DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD0DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD0DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSIVID1DELAY1 0x00000E58
+#define MCDE_DSIVID1DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID1DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID1DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID1DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD1DELAY1 0x00000E78
+#define MCDE_DSICMD1DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD1DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD1DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD1DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSIVID2DELAY1 0x00000E98
+#define MCDE_DSIVID2DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSIVID2DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSIVID2DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY1, TEREQDEL, __x)
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSIVID2DELAY1, FRAMESTARTDEL, __x)
+#define MCDE_DSICMD2DELAY1 0x00000EB8
+#define MCDE_DSICMD2DELAY1_TEREQDEL_SHIFT 0
+#define MCDE_DSICMD2DELAY1_TEREQDEL_MASK 0x00000FFF
+#define MCDE_DSICMD2DELAY1_TEREQDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY1, TEREQDEL, __x)
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_SHIFT 16
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000
+#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL(__x) \
+ MCDE_VAL2REG(MCDE_DSICMD2DELAY1, FRAMESTARTDEL, __x)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 37096246c93..14e568be223 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -349,6 +349,22 @@ config IMX2_WDT
To compile this driver as a module, choose M here: the
module will be called imx2_wdt.
+config UX500_WATCHDOG
+ bool "ST-Ericsson Ux500 watchdog"
+ depends on UX500_SOC_DB8500 || UX500_SOC_DB5500
+ default y
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog existing in the prcmu of ST-Ericsson Ux500 series platforms.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
+config UX500_WATCHDOG_DEBUG
+ bool "ST-Ericsson Ux500 watchdog DEBUG"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && DEBUG_FS
+ help
+ Say Y here to add various debugfs entries in wdog/
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index e8f479a1640..738a0f3ad21 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
+obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 7c741dc987b..96ae2465c25 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -35,11 +35,13 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/kexec.h>
#include <asm/smp_twd.h>
struct mpcore_wdt {
- unsigned long timer_alive;
+ cpumask_t timer_alive;
struct device *dev;
void __iomem *base;
int irq;
@@ -50,6 +52,8 @@ struct mpcore_wdt {
static struct platform_device *mpcore_wdt_pdev;
static DEFINE_SPINLOCK(wdt_lock);
+static DEFINE_PER_CPU(unsigned long, mpcore_wdt_rate);
+
#define TIMER_MARGIN 60
static int mpcore_margin = TIMER_MARGIN;
module_param(mpcore_margin, int, 0);
@@ -70,6 +74,8 @@ MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, "
"set to 1 to ignore reboots, 0 to reboot (default="
__MODULE_STRING(ONLY_TESTING) ")");
+#define MPCORE_WDT_PERIPHCLK_PRESCALER 2
+
/*
* This is the interrupt handler. Note that we only use this
* in testing mode, so don't actually do a reboot here.
@@ -102,9 +108,8 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
spin_lock(&wdt_lock);
/* Assume prescale is set to 256 */
- count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
- count = (0xFFFFFFFFU - count) * (HZ / 5);
- count = (count / 256) * mpcore_margin;
+ count = per_cpu(mpcore_wdt_rate, smp_processor_id()) / 256;
+ count = count*mpcore_margin;
/* Reload the counter */
writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
@@ -112,6 +117,56 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
spin_unlock(&wdt_lock);
}
+static void mpcore_wdt_set_rate(unsigned long new_rate)
+{
+ unsigned long count;
+ unsigned long long rate_tmp;
+ unsigned long old_rate;
+
+ spin_lock(&wdt_lock);
+ old_rate = per_cpu(mpcore_wdt_rate, smp_processor_id());
+ per_cpu(mpcore_wdt_rate, smp_processor_id()) = new_rate;
+
+ if (mpcore_wdt_dev) {
+ struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
+ count = readl(wdt->base + TWD_WDOG_COUNTER);
+ /* The goal: count = count * (new_rate/old_rate); */
+ rate_tmp = (unsigned long long)count * new_rate;
+ do_div(rate_tmp, old_rate);
+ count = rate_tmp;
+ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
+ wdt->perturb = wdt->perturb ? 0 : 1;
+ }
+ spin_unlock(&wdt_lock);
+}
+
+static void mpcore_wdt_update_cpu_frequency_on_cpu(void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ mpcore_wdt_set_rate((freq->new * 1000) /
+ MPCORE_WDT_PERIPHCLK_PRESCALER);
+}
+
+static int mpcore_wdt_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+
+ if (event == CPUFREQ_RESUMECHANGE ||
+ (event == CPUFREQ_PRECHANGE && freq->new > freq->old) ||
+ (event == CPUFREQ_POSTCHANGE && freq->new < freq->old))
+ smp_call_function_single(freq->cpu,
+ mpcore_wdt_update_cpu_frequency_on_cpu,
+ freq, 1);
+
+ return 0;
+}
+
+static struct notifier_block mpcore_wdt_cpufreq_notifier_block = {
+ .notifier_call = mpcore_wdt_cpufreq_notifier,
+};
+
+
static void mpcore_wdt_stop(struct mpcore_wdt *wdt)
{
spin_lock(&wdt_lock);
@@ -146,6 +201,20 @@ static int mpcore_wdt_set_heartbeat(int t)
return 0;
}
+static int mpcore_wdt_stop_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev);
+ printk(KERN_INFO "Stopping watchdog on non-crashing core %u\n",
+ smp_processor_id());
+ mpcore_wdt_stop(wdt);
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block mpcore_wdt_stop_block = {
+ .notifier_call = mpcore_wdt_stop_notifier,
+};
+
/*
* /dev/watchdog handling
*/
@@ -153,7 +222,7 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file)
{
struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_pdev);
- if (test_and_set_bit(0, &wdt->timer_alive))
+ if (cpumask_test_and_set_cpu(smp_processor_id(), &wdt->timer_alive))
return -EBUSY;
if (nowayout)
@@ -161,6 +230,9 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file)
file->private_data = wdt;
+ atomic_notifier_chain_register(&crash_percpu_notifier_list,
+ &mpcore_wdt_stop_block);
+
/*
* Activate timer
*/
@@ -184,7 +256,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
"unexpected close, not stopping watchdog!\n");
mpcore_wdt_keepalive(wdt);
}
- clear_bit(0, &wdt->timer_alive);
+ cpumask_clear_cpu(smp_processor_id(), &wdt->timer_alive);
wdt->expect_close = 0;
return 0;
}
@@ -427,6 +499,8 @@ static struct platform_driver mpcore_wdt_driver = {
static int __init mpcore_wdt_init(void)
{
+ int i;
+
/*
* Check that the margin value is within it's range;
* if not reset to the default
@@ -437,6 +511,18 @@ static int __init mpcore_wdt_init(void)
TIMER_MARGIN);
}
+ cpufreq_register_notifier(&mpcore_wdt_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ for_each_online_cpu(i)
+ per_cpu(mpcore_wdt_rate, i) =
+ (cpufreq_get(i) * 1000) / MPCORE_WDT_PERIPHCLK_PRESCALER;
+
+ for_each_online_cpu(i)
+ pr_info("mpcore_wdt: rate for core %d is %lu.%02luMHz.\n", i,
+ per_cpu(mpcore_wdt_rate, i) / 1000000,
+ (per_cpu(mpcore_wdt_rate, i) / 10000) % 100);
+
pr_info("MPcore Watchdog Timer: 0.1. mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n",
mpcore_noboot, mpcore_margin, nowayout);
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
new file mode 100644
index 00000000000..a1e8c2dbf10
--- /dev/null
+++ b/drivers/watchdog/ux500_wdt.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * Heavily based upon geodewdt.c
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#define WATCHDOG_TIMEOUT 600 /* 10 minutes */
+
+#define WDT_FLAGS_OPEN 1
+#define WDT_FLAGS_ORPHAN 2
+
+static unsigned long wdt_flags;
+
+static int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static u8 wdog_id;
+static bool wdt_en;
+static bool wdt_auto_off = false;
+static bool safe_close;
+
+static int ux500_wdt_open(struct inode *inode, struct file *file)
+{
+ if (!timeout)
+ return -ENODEV;
+
+ if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags))
+ return -EBUSY;
+
+ if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags))
+ __module_get(THIS_MODULE);
+
+ prcmu_enable_a9wdog(wdog_id);
+ wdt_en = true;
+
+ return nonseekable_open(inode, file);
+}
+
+static int ux500_wdt_release(struct inode *inode, struct file *file)
+{
+ if (safe_close) {
+ prcmu_disable_a9wdog(wdog_id);
+ module_put(THIS_MODULE);
+ } else {
+ pr_crit("Unexpected close - watchdog is not stopping.\n");
+ prcmu_kick_a9wdog(wdog_id);
+
+ set_bit(WDT_FLAGS_ORPHAN, &wdt_flags);
+ }
+
+ clear_bit(WDT_FLAGS_OPEN, &wdt_flags);
+ safe_close = false;
+ return 0;
+}
+
+static ssize_t ux500_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (!len)
+ return len;
+
+ if (!nowayout) {
+ size_t i;
+ safe_close = false;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ safe_close = true;
+ }
+ }
+
+ prcmu_kick_a9wdog(wdog_id);
+
+ return len;
+}
+
+static long ux500_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int interval;
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 1,
+ .identity = "Ux500 WDT",
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_SETOPTIONS:
+ {
+ int options;
+ int ret = -EINVAL;
+
+ if (get_user(options, p))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD) {
+ prcmu_disable_a9wdog(wdog_id);
+ wdt_en = false;
+ ret = 0;
+ }
+
+ if (options & WDIOS_ENABLECARD) {
+ prcmu_enable_a9wdog(wdog_id);
+ wdt_en = true;
+ ret = 0;
+ }
+
+ return ret;
+ }
+ case WDIOC_KEEPALIVE:
+ return prcmu_kick_a9wdog(wdog_id);
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(interval, p))
+ return -EFAULT;
+
+ if (cpu_is_u8500()) {
+ /* 28 bit resolution in ms, becomes 268435.455 s */
+ if (interval > 268435 || interval < 0)
+ return -EINVAL;
+ } else if (cpu_is_u5500()) {
+ /* 32 bit resolution in ms, becomes 4294967.295 s */
+ if (interval > 4294967 || interval < 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ timeout = interval;
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+
+ /* Fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, p);
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static const struct file_operations ux500_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = ux500_wdt_write,
+ .unlocked_ioctl = ux500_wdt_ioctl,
+ .open = ux500_wdt_open,
+ .release = ux500_wdt_release,
+};
+
+static struct miscdevice ux500_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &ux500_wdt_fops,
+};
+
+#ifdef CONFIG_UX500_WATCHDOG_DEBUG
+enum wdog_dbg {
+ WDOG_DBG_CONFIG,
+ WDOG_DBG_LOAD,
+ WDOG_DBG_KICK,
+ WDOG_DBG_EN,
+ WDOG_DBG_DIS,
+};
+
+static ssize_t wdog_dbg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long val;
+ int err;
+ enum wdog_dbg v = (enum wdog_dbg)((struct seq_file *)
+ (file->private_data))->private;
+
+ switch(v) {
+ case WDOG_DBG_CONFIG:
+ err = kstrtoul_from_user(user_buf, count, 0, &val);
+
+ if (!err) {
+ wdt_auto_off = val != 0;
+ (void) prcmu_config_a9wdog(1,
+ wdt_auto_off);
+ }
+ else {
+ pr_err("ux500_wdt:dbg: unknown value\n");
+ }
+ break;
+ case WDOG_DBG_LOAD:
+ err = kstrtoul_from_user(user_buf, count, 0, &val);
+
+ if (!err) {
+ timeout = val;
+ /* Convert seconds to ms */
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ else {
+ pr_err("ux500_wdt:dbg: unknown value\n");
+ }
+ break;
+ case WDOG_DBG_KICK:
+ (void) prcmu_kick_a9wdog(wdog_id);
+ break;
+ case WDOG_DBG_EN:
+ wdt_en = true;
+ (void) prcmu_enable_a9wdog(wdog_id);
+ break;
+ case WDOG_DBG_DIS:
+ wdt_en = false;
+ (void) prcmu_disable_a9wdog(wdog_id);
+ break;
+ }
+
+ return count;
+}
+
+static int wdog_dbg_read(struct seq_file *s, void *p)
+{
+ enum wdog_dbg v = (enum wdog_dbg)s->private;
+
+ switch(v) {
+ case WDOG_DBG_CONFIG:
+ seq_printf(s,"wdog is on id %d, auto off on sleep: %s\n",
+ (int)wdog_id,
+ wdt_auto_off ? "enabled": "disabled");
+ break;
+ case WDOG_DBG_LOAD:
+ /* In 1s */
+ seq_printf(s, "wdog load is: %d s\n",
+ timeout);
+ break;
+ case WDOG_DBG_KICK:
+ break;
+ case WDOG_DBG_EN:
+ case WDOG_DBG_DIS:
+ seq_printf(s, "wdog is %sabled\n",
+ wdt_en ? "en" : "dis");
+ break;
+ }
+ return 0;
+}
+
+static int wdog_dbg_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, wdog_dbg_read, inode->i_private);
+}
+
+static const struct file_operations wdog_dbg_fops = {
+ .open = wdog_dbg_open,
+ .write = wdog_dbg_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int __init wdog_dbg_init(void)
+{
+ struct dentry *wdog_dir;
+
+ wdog_dir = debugfs_create_dir("wdog", NULL);
+ if (IS_ERR_OR_NULL(wdog_dir))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_u8("id",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ &wdog_id)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("config",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_CONFIG,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("load",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_LOAD,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("kick",
+ S_IWUGO, wdog_dir,
+ (void *)WDOG_DBG_KICK,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("enable",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_EN,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("disable",
+ S_IWUGO | S_IRUGO, wdog_dir,
+ (void *)WDOG_DBG_DIS,
+ &wdog_dbg_fops)))
+ goto fail;
+
+ return 0;
+fail:
+ pr_err("ux500:wdog: Failed to initialize wdog dbg\n");
+ debugfs_remove_recursive(wdog_dir);
+
+ return -EFAULT;
+}
+
+#else
+static inline int __init wdog_dbg_init(void)
+{
+ return 0;
+}
+#endif
+
+static int __init ux500_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ /* Number of watch dogs */
+ prcmu_config_a9wdog(1, wdt_auto_off);
+ /* convert to ms */
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+
+ ret = misc_register(&ux500_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register misc\n");
+ return ret;
+ }
+
+ ret = wdog_dbg_init();
+ if (ret < 0)
+ goto fail;
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+fail:
+ misc_deregister(&ux500_wdt_miscdev);
+ return ret;
+}
+
+static int __exit ux500_wdt_remove(struct platform_device *dev)
+{
+ prcmu_disable_a9wdog(wdog_id);
+ wdt_en = false;
+ misc_deregister(&ux500_wdt_miscdev);
+ return 0;
+}
+#ifdef CONFIG_PM
+static int ux500_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (wdt_en && cpu_is_u5500()) {
+ prcmu_disable_a9wdog(wdog_id);
+ return 0;
+ }
+
+ if (wdt_en && !wdt_auto_off) {
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_config_a9wdog(1, true);
+
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ return 0;
+}
+
+static int ux500_wdt_resume(struct platform_device *pdev)
+{
+ if (wdt_en && cpu_is_u5500()) {
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ return 0;
+ }
+
+ if (wdt_en && !wdt_auto_off) {
+ prcmu_disable_a9wdog(wdog_id);
+ prcmu_config_a9wdog(1, wdt_auto_off);
+
+ prcmu_load_a9wdog(wdog_id, timeout * 1000);
+ prcmu_enable_a9wdog(wdog_id);
+ }
+ return 0;
+}
+
+#else
+#define ux500_wdt_suspend NULL
+#define ux500_wdt_resume NULL
+#endif
+static struct platform_driver ux500_wdt_driver = {
+ .remove = __exit_p(ux500_wdt_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ux500_wdt",
+ },
+ .suspend = ux500_wdt_suspend,
+ .resume = ux500_wdt_resume,
+};
+
+static int __init ux500_wdt_init(void)
+{
+ return platform_driver_probe(&ux500_wdt_driver, ux500_wdt_probe);
+}
+module_init(ux500_wdt_init);
+
+MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>");
+MODULE_DESCRIPTION("Ux500 Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);